Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6 into for-davem
diff --git a/.mailmap b/.mailmap
index 581fd39..1eba28a 100644
--- a/.mailmap
+++ b/.mailmap
@@ -23,6 +23,7 @@
 Arnaud Patard <arnaud.patard@rtp-net.org>
 Arnd Bergmann <arnd@arndb.de>
 Axel Dyks <xl@xlsigned.net>
+Axel Lin <axel.lin@gmail.com>
 Ben Gardner <bgardner@wabtec.com>
 Ben M Cahill <ben.m.cahill@intel.com>
 Björn Steinbrink <B.Steinbrink@gmx.de>
diff --git a/Documentation/ABI/testing/sysfs-platform-at91 b/Documentation/ABI/testing/sysfs-platform-at91
new file mode 100644
index 0000000..4cc6a86
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-at91
@@ -0,0 +1,25 @@
+What:		/sys/devices/platform/at91_can/net/<iface>/mb0_id
+Date:		January 2011
+KernelVersion:	2.6.38
+Contact:	Marc Kleine-Budde <kernel@pengutronix.de>
+Description:
+		Value representing the can_id of mailbox 0.
+
+		Default: 0x7ff (standard frame)
+
+		Due to a chip bug (errata 50.2.6.3 & 50.3.5.3 in
+		"AT91SAM9263 Preliminary 6249H-ATARM-27-Jul-09") the
+		contents of mailbox 0 may be send under certain
+		conditions (even if disabled or in rx mode).
+
+		The workaround in the errata suggests not to use the
+		mailbox and load it with an unused identifier.
+
+		In order to use an extended can_id add the
+		CAN_EFF_FLAG (0x80000000U) to the can_id. Example:
+
+		- standard id 0x7ff:
+		echo 0x7ff      > /sys/class/net/can0/mb0_id
+
+		- extended id 0x1fffffff:
+		echo 0x9fffffff > /sys/class/net/can0/mb0_id
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index 35447e0..36f63d4 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -217,8 +217,8 @@
   <chapter id="uart16x50">
      <title>16x50 UART Driver</title>
 !Iinclude/linux/serial_core.h
-!Edrivers/serial/serial_core.c
-!Edrivers/serial/8250.c
+!Edrivers/tty/serial/serial_core.c
+!Edrivers/tty/serial/8250.c
   </chapter>
 
   <chapter id="fbdev">
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 2861055..c279158 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -73,8 +73,8 @@
       services.
     </para>
     <para>
-      The core of every DRM driver is struct drm_device.  Drivers
-      will typically statically initialize a drm_device structure,
+      The core of every DRM driver is struct drm_driver.  Drivers
+      will typically statically initialize a drm_driver structure,
       then pass it to drm_init() at load time.
     </para>
 
@@ -84,7 +84,7 @@
     <title>Driver initialization</title>
     <para>
       Before calling the DRM initialization routines, the driver must
-      first create and fill out a struct drm_device structure.
+      first create and fill out a struct drm_driver structure.
     </para>
     <programlisting>
       static struct drm_driver driver = {
diff --git a/Documentation/DocBook/dvb/dvbapi.xml b/Documentation/DocBook/dvb/dvbapi.xml
index e3a97fd..ad8678d 100644
--- a/Documentation/DocBook/dvb/dvbapi.xml
+++ b/Documentation/DocBook/dvb/dvbapi.xml
@@ -28,7 +28,7 @@
 	<holder>Convergence GmbH</holder>
 </copyright>
 <copyright>
-	<year>2009-2010</year>
+	<year>2009-2011</year>
 	<holder>Mauro Carvalho Chehab</holder>
 </copyright>
 
diff --git a/Documentation/DocBook/media.tmpl b/Documentation/DocBook/media.tmpl
index f11048d..a99088a 100644
--- a/Documentation/DocBook/media.tmpl
+++ b/Documentation/DocBook/media.tmpl
@@ -28,7 +28,7 @@
 <title>LINUX MEDIA INFRASTRUCTURE API</title>
 
 <copyright>
-	<year>2009-2010</year>
+	<year>2009-2011</year>
 	<holder>LinuxTV Developers</holder>
 </copyright>
 
@@ -86,7 +86,7 @@
 </author>
 </authorgroup>
 <copyright>
-	<year>2009-2010</year>
+	<year>2009-2011</year>
 	<holder>Mauro Carvalho Chehab</holder>
 </copyright>
 
diff --git a/Documentation/DocBook/v4l/dev-rds.xml b/Documentation/DocBook/v4l/dev-rds.xml
index 360d273..2427f54 100644
--- a/Documentation/DocBook/v4l/dev-rds.xml
+++ b/Documentation/DocBook/v4l/dev-rds.xml
@@ -75,6 +75,7 @@
   </section>
 
   <section>
+    <title>RDS datastructures</title>
     <table frame="none" pgwide="1" id="v4l2-rds-data">
       <title>struct
 <structname>v4l2_rds_data</structname></title>
@@ -129,10 +130,11 @@
 
     <table frame="none" pgwide="1" id="v4l2-rds-block-codes">
       <title>Block defines</title>
-      <tgroup cols="3">
+      <tgroup cols="4">
 	<colspec colname="c1" colwidth="1*" />
 	<colspec colname="c2" colwidth="1*" />
-	<colspec colname="c3" colwidth="5*" />
+	<colspec colname="c3" colwidth="1*" />
+	<colspec colname="c4" colwidth="5*" />
 	<tbody valign="top">
 	  <row>
 	    <entry>V4L2_RDS_BLOCK_MSK</entry>
diff --git a/Documentation/DocBook/v4l/v4l2.xml b/Documentation/DocBook/v4l/v4l2.xml
index 839e93e..9288af9 100644
--- a/Documentation/DocBook/v4l/v4l2.xml
+++ b/Documentation/DocBook/v4l/v4l2.xml
@@ -100,6 +100,7 @@
       <year>2008</year>
       <year>2009</year>
       <year>2010</year>
+      <year>2011</year>
       <holder>Bill Dirks, Michael H. Schimek, Hans Verkuil, Martin
 Rubli, Andy Walls, Muralidharan Karicheri, Mauro Carvalho Chehab</holder>
     </copyright>
@@ -381,7 +382,7 @@
 </partinfo>
 
 <title>Video for Linux Two API Specification</title>
- <subtitle>Revision 2.6.33</subtitle>
+ <subtitle>Revision 2.6.38</subtitle>
 
   <chapter id="common">
     &sub-common;
diff --git a/Documentation/powerpc/dts-bindings/fsl/sata.txt b/Documentation/devicetree/bindings/ata/fsl-sata.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/sata.txt
rename to Documentation/devicetree/bindings/ata/fsl-sata.txt
diff --git a/Documentation/powerpc/dts-bindings/eeprom.txt b/Documentation/devicetree/bindings/eeprom.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/eeprom.txt
rename to Documentation/devicetree/bindings/eeprom.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt b/Documentation/devicetree/bindings/gpio/8xxx_gpio.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/8xxx_gpio.txt
rename to Documentation/devicetree/bindings/gpio/8xxx_gpio.txt
diff --git a/Documentation/powerpc/dts-bindings/gpio/gpio.txt b/Documentation/devicetree/bindings/gpio/gpio.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/gpio/gpio.txt
rename to Documentation/devicetree/bindings/gpio/gpio.txt
diff --git a/Documentation/powerpc/dts-bindings/gpio/led.txt b/Documentation/devicetree/bindings/gpio/led.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/gpio/led.txt
rename to Documentation/devicetree/bindings/gpio/led.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/i2c.txt b/Documentation/devicetree/bindings/i2c/fsl-i2c.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/i2c.txt
rename to Documentation/devicetree/bindings/i2c/fsl-i2c.txt
diff --git a/Documentation/powerpc/dts-bindings/marvell.txt b/Documentation/devicetree/bindings/marvell.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/marvell.txt
rename to Documentation/devicetree/bindings/marvell.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/esdhc.txt
rename to Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
diff --git a/Documentation/powerpc/dts-bindings/mmc-spi-slot.txt b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/mmc-spi-slot.txt
rename to Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/upm-nand.txt b/Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/upm-nand.txt
rename to Documentation/devicetree/bindings/mtd/fsl-upm-nand.txt
diff --git a/Documentation/powerpc/dts-bindings/mtd-physmap.txt b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/mtd-physmap.txt
rename to Documentation/devicetree/bindings/mtd/mtd-physmap.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/can.txt b/Documentation/devicetree/bindings/net/can/mpc5xxx-mscan.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/can.txt
rename to Documentation/devicetree/bindings/net/can/mpc5xxx-mscan.txt
diff --git a/Documentation/powerpc/dts-bindings/can/sja1000.txt b/Documentation/devicetree/bindings/net/can/sja1000.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/can/sja1000.txt
rename to Documentation/devicetree/bindings/net/can/sja1000.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/tsec.txt b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/tsec.txt
rename to Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
diff --git a/Documentation/powerpc/dts-bindings/gpio/mdio.txt b/Documentation/devicetree/bindings/net/mdio-gpio.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/gpio/mdio.txt
rename to Documentation/devicetree/bindings/net/mdio-gpio.txt
diff --git a/Documentation/powerpc/dts-bindings/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/phy.txt
rename to Documentation/devicetree/bindings/net/phy.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/83xx-512x-pci.txt b/Documentation/devicetree/bindings/pci/83xx-512x-pci.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/83xx-512x-pci.txt
rename to Documentation/devicetree/bindings/pci/83xx-512x-pci.txt
diff --git a/Documentation/powerpc/dts-bindings/4xx/cpm.txt b/Documentation/devicetree/bindings/powerpc/4xx/cpm.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/4xx/cpm.txt
rename to Documentation/devicetree/bindings/powerpc/4xx/cpm.txt
diff --git a/Documentation/powerpc/dts-bindings/4xx/emac.txt b/Documentation/devicetree/bindings/powerpc/4xx/emac.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/4xx/emac.txt
rename to Documentation/devicetree/bindings/powerpc/4xx/emac.txt
diff --git a/Documentation/powerpc/dts-bindings/4xx/ndfc.txt b/Documentation/devicetree/bindings/powerpc/4xx/ndfc.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/4xx/ndfc.txt
rename to Documentation/devicetree/bindings/powerpc/4xx/ndfc.txt
diff --git a/Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt b/Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/4xx/ppc440spe-adma.txt
rename to Documentation/devicetree/bindings/powerpc/4xx/ppc440spe-adma.txt
diff --git a/Documentation/powerpc/dts-bindings/4xx/reboot.txt b/Documentation/devicetree/bindings/powerpc/4xx/reboot.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/4xx/reboot.txt
rename to Documentation/devicetree/bindings/powerpc/4xx/reboot.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/board.txt b/Documentation/devicetree/bindings/powerpc/fsl/board.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/board.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/board.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/brg.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/brg.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/brg.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/brg.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/i2c.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/i2c.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/i2c.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/i2c.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/pic.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/pic.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/pic.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/pic.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/usb.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/usb.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/cpm/usb.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/cpm/usb.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/gpio.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/gpio.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/gpio.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/network.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/network.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/network.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/network.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/firmware.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/firmware.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/firmware.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/firmware.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/par_io.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/par_io.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/par_io.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/par_io.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/pincfg.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/pincfg.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/pincfg.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/pincfg.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/ucc.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/ucc.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/ucc.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/ucc.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/usb.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/usb.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe/usb.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/qe/usb.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/serial.txt b/Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/serial.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/cpm_qe/serial.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/cpm_qe/serial.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/diu.txt b/Documentation/devicetree/bindings/powerpc/fsl/diu.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/diu.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/diu.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/dma.txt b/Documentation/devicetree/bindings/powerpc/fsl/dma.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/dma.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/dma.txt
diff --git a/Documentation/powerpc/dts-bindings/ecm.txt b/Documentation/devicetree/bindings/powerpc/fsl/ecm.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/ecm.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/ecm.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/gtm.txt b/Documentation/devicetree/bindings/powerpc/fsl/gtm.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/gtm.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/gtm.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/guts.txt b/Documentation/devicetree/bindings/powerpc/fsl/guts.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/guts.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/guts.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/lbc.txt b/Documentation/devicetree/bindings/powerpc/fsl/lbc.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/lbc.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/lbc.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/mcm.txt b/Documentation/devicetree/bindings/powerpc/fsl/mcm.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/mcm.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/mcm.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/mcu-mpc8349emitx.txt b/Documentation/devicetree/bindings/powerpc/fsl/mcu-mpc8349emitx.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/mcu-mpc8349emitx.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/mcu-mpc8349emitx.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/mpc5121-psc.txt b/Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/mpc5121-psc.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/mpc5121-psc.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt b/Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/mpc5200.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/mpc5200.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/mpic.txt b/Documentation/devicetree/bindings/powerpc/fsl/mpic.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/mpic.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/mpic.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/msi-pic.txt b/Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/msi-pic.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/msi-pic.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/pmc.txt b/Documentation/devicetree/bindings/powerpc/fsl/pmc.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/pmc.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/pmc.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/sec.txt b/Documentation/devicetree/bindings/powerpc/fsl/sec.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/sec.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/sec.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/ssi.txt b/Documentation/devicetree/bindings/powerpc/fsl/ssi.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/ssi.txt
rename to Documentation/devicetree/bindings/powerpc/fsl/ssi.txt
diff --git a/Documentation/powerpc/dts-bindings/nintendo/gamecube.txt b/Documentation/devicetree/bindings/powerpc/nintendo/gamecube.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/nintendo/gamecube.txt
rename to Documentation/devicetree/bindings/powerpc/nintendo/gamecube.txt
diff --git a/Documentation/powerpc/dts-bindings/nintendo/wii.txt b/Documentation/devicetree/bindings/powerpc/nintendo/wii.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/nintendo/wii.txt
rename to Documentation/devicetree/bindings/powerpc/nintendo/wii.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/spi.txt b/Documentation/devicetree/bindings/spi/fsl-spi.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/spi.txt
rename to Documentation/devicetree/bindings/spi/fsl-spi.txt
diff --git a/Documentation/powerpc/dts-bindings/spi-bus.txt b/Documentation/devicetree/bindings/spi/spi-bus.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/spi-bus.txt
rename to Documentation/devicetree/bindings/spi/spi-bus.txt
diff --git a/Documentation/powerpc/dts-bindings/fsl/usb.txt b/Documentation/devicetree/bindings/usb/fsl-usb.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/fsl/usb.txt
rename to Documentation/devicetree/bindings/usb/fsl-usb.txt
diff --git a/Documentation/powerpc/dts-bindings/usb-ehci.txt b/Documentation/devicetree/bindings/usb/usb-ehci.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/usb-ehci.txt
rename to Documentation/devicetree/bindings/usb/usb-ehci.txt
diff --git a/Documentation/powerpc/dts-bindings/xilinx.txt b/Documentation/devicetree/bindings/xilinx.txt
similarity index 100%
rename from Documentation/powerpc/dts-bindings/xilinx.txt
rename to Documentation/devicetree/bindings/xilinx.txt
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/devicetree/booting-without-of.txt
similarity index 90%
rename from Documentation/powerpc/booting-without-of.txt
rename to Documentation/devicetree/booting-without-of.txt
index 7400d75..28b1c9d 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/devicetree/booting-without-of.txt
@@ -13,7 +13,6 @@
 
   I - Introduction
     1) Entry point for arch/powerpc
-    2) Board support
 
   II - The DT block format
     1) Header
@@ -41,13 +40,6 @@
   VI - System-on-a-chip devices and nodes
     1) Defining child nodes of an SOC
     2) Representing devices without a current OF specification
-      a) PHY nodes
-      b) Interrupt controllers
-      c) 4xx/Axon EMAC ethernet nodes
-      d) Xilinx IP cores
-      e) USB EHCI controllers
-      f) MDIO on GPIOs
-      g) SPI busses
 
   VII - Specifying interrupt information for devices
     1) interrupts property
@@ -123,7 +115,7 @@
 I - Introduction
 ================
 
-During the recent development of the Linux/ppc64 kernel, and more
+During the development of the Linux/ppc64 kernel, and more
 specifically, the addition of new platform types outside of the old
 IBM pSeries/iSeries pair, it was decided to enforce some strict rules
 regarding the kernel entry and bootloader <-> kernel interfaces, in
@@ -146,7 +138,7 @@
 create a node for every PCI device in the system. It is a requirement
 to have a node for PCI host bridges in order to provide interrupt
 routing informations and memory/IO ranges, among others. It is also
-recommended to define nodes for on chip devices and other busses that
+recommended to define nodes for on chip devices and other buses that
 don't specifically fit in an existing OF specification. This creates a
 great flexibility in the way the kernel can then probe those and match
 drivers to device, without having to hard code all sorts of tables. It
@@ -158,7 +150,7 @@
 1) Entry point for arch/powerpc
 -------------------------------
 
-   There is one and one single entry point to the kernel, at the start
+   There is one single entry point to the kernel, at the start
    of the kernel image. That entry point supports two calling
    conventions:
 
@@ -210,12 +202,6 @@
         with all CPUs. The way to do that with method b) will be
         described in a later revision of this document.
 
-
-2) Board support
-----------------
-
-64-bit kernels:
-
    Board supports (platforms) are not exclusive config options. An
    arbitrary set of board supports can be built in a single kernel
    image. The kernel will "know" what set of functions to use for a
@@ -234,48 +220,11 @@
         containing the various callbacks that the generic code will
         use to get to your platform specific code
 
-        c) Add a reference to your "ppc_md" structure in the
-        "machines" table in arch/powerpc/kernel/setup_64.c if you are
-        a 64-bit platform.
-
-        d) request and get assigned a platform number (see PLATFORM_*
-        constants in arch/powerpc/include/asm/processor.h
-
-32-bit embedded kernels:
-
-  Currently, board support is essentially an exclusive config option.
-  The kernel is configured for a single platform.  Part of the reason
-  for this is to keep kernels on embedded systems small and efficient;
-  part of this is due to the fact the code is already that way. In the
-  future, a kernel may support multiple platforms, but only if the
+  A kernel image may support multiple platforms, but only if the
   platforms feature the same core architecture.  A single kernel build
   cannot support both configurations with Book E and configurations
   with classic Powerpc architectures.
 
-  32-bit embedded platforms that are moved into arch/powerpc using a
-  flattened device tree should adopt the merged tree practice of
-  setting ppc_md up dynamically, even though the kernel is currently
-  built with support for only a single platform at a time.  This allows
-  unification of the setup code, and will make it easier to go to a
-  multiple-platform-support model in the future.
-
-NOTE: I believe the above will be true once Ben's done with the merge
-of the boot sequences.... someone speak up if this is wrong!
-
-  To add a 32-bit embedded platform support, follow the instructions
-  for 64-bit platforms above, with the exception that the Kconfig
-  option should be set up such that the kernel builds exclusively for
-  the platform selected.  The processor type for the platform should
-  enable another config option to select the specific board
-  supported.
-
-NOTE: If Ben doesn't merge the setup files, may need to change this to
-point to setup_32.c
-
-
-   I will describe later the boot process and various callbacks that
-   your platform should implement.
-
 
 II - The DT block format
 ========================
@@ -300,8 +249,8 @@
 1) Header
 ---------
 
-   The kernel is entered with r3 pointing to an area of memory that is
-   roughly described in arch/powerpc/include/asm/prom.h by the structure
+   The kernel is passed the physical address pointing to an area of memory
+   that is roughly described in include/linux/of_fdt.h by the structure
    boot_param_header:
 
 struct boot_param_header {
@@ -339,7 +288,7 @@
    All values in this header are in big endian format, the various
    fields in this header are defined more precisely below. All
    "offset" values are in bytes from the start of the header; that is
-   from the value of r3.
+   from the physical base address of the device tree block.
 
    - magic
 
@@ -437,7 +386,7 @@
 
 
              ------------------------------
-       r3 -> |  struct boot_param_header  |
+     base -> |  struct boot_param_header  |
              ------------------------------
              |      (alignment gap) (*)   |
              ------------------------------
@@ -457,7 +406,7 @@
       -----> ------------------------------
       |
       |
-      --- (r3 + totalsize)
+      --- (base + totalsize)
 
   (*) The alignment gaps are not necessarily present; their presence
       and size are dependent on the various alignment requirements of
@@ -500,7 +449,7 @@
 the device-tree. More details about the actual format of these will be
 below.
 
-The kernel powerpc generic code does not make any formal use of the
+The kernel generic code does not make any formal use of the
 unit address (though some board support code may do) so the only real
 requirement here for the unit address is to ensure uniqueness of
 the node unit name at a given level of the tree. Nodes with no notion
@@ -518,20 +467,21 @@
 
 Every node which actually represents an actual device (that is, a node
 which isn't only a virtual "container" for more nodes, like "/cpus"
-is) is also required to have a "device_type" property indicating the
-type of node .
+is) is also required to have a "compatible" property indicating the
+specific hardware and an optional list of devices it is fully
+backwards compatible with.
 
 Finally, every node that can be referenced from a property in another
-node is required to have a "linux,phandle" property. Real open
-firmware implementations provide a unique "phandle" value for every
-node that the "prom_init()" trampoline code turns into
-"linux,phandle" properties. However, this is made optional if the
-flattened device tree is used directly. An example of a node
+node is required to have either a "phandle" or a "linux,phandle"
+property. Real Open Firmware implementations provide a unique
+"phandle" value for every node that the "prom_init()" trampoline code
+turns into "linux,phandle" properties. However, this is made optional
+if the flattened device tree is used directly. An example of a node
 referencing another node via "phandle" is when laying out the
 interrupt tree which will be described in a further version of this
 document.
 
-This "linux, phandle" property is a 32-bit value that uniquely
+The "phandle" property is a 32-bit value that uniquely
 identifies a node. You are free to use whatever values or system of
 values, internal pointers, or whatever to generate these, the only
 requirement is that every node for which you provide that property has
@@ -694,7 +644,7 @@
 while the top cell contains address space indication, flags, and pci
 bus & device numbers.
 
-For busses that support dynamic allocation, it's the accepted practice
+For buses that support dynamic allocation, it's the accepted practice
 to then not provide the address in "reg" (keep it 0) though while
 providing a flag indicating the address is dynamically allocated, and
 then, to provide a separate "assigned-addresses" property that
@@ -711,7 +661,7 @@
 The "reg" property only defines addresses and sizes (if #size-cells is
 non-0) within a given bus. In order to translate addresses upward
 (that is into parent bus addresses, and possibly into CPU physical
-addresses), all busses must contain a "ranges" property. If the
+addresses), all buses must contain a "ranges" property. If the
 "ranges" property is missing at a given level, it's assumed that
 translation isn't possible, i.e., the registers are not visible on the
 parent bus.  The format of the "ranges" property for a bus is a list
@@ -727,9 +677,9 @@
 PCI<->ISA bridge, that would be a PCI address. It defines the base
 address in the parent bus where the beginning of that range is mapped.
 
-For a new 64-bit powerpc board, I recommend either the 2/2 format or
+For new 64-bit board support, I recommend either the 2/2 format or
 Apple's 2/1 format which is slightly more compact since sizes usually
-fit in a single 32-bit word.   New 32-bit powerpc boards should use a
+fit in a single 32-bit word.   New 32-bit board support should use a
 1/1 format, unless the processor supports physical addresses greater
 than 32-bits, in which case a 2/1 format is recommended.
 
@@ -754,7 +704,7 @@
 While earlier users of Open Firmware like OldWorld macintoshes tended
 to use the actual device name for the "name" property, it's nowadays
 considered a good practice to use a name that is closer to the device
-class (often equal to device_type). For example, nowadays, ethernet
+class (often equal to device_type). For example, nowadays, Ethernet
 controllers are named "ethernet", an additional "model" property
 defining precisely the chip type/model, and "compatible" property
 defining the family in case a single driver can driver more than one
@@ -772,7 +722,7 @@
 4) Note about node and property names and character set
 -------------------------------------------------------
 
-While open firmware provides more flexible usage of 8859-1, this
+While Open Firmware provides more flexible usage of 8859-1, this
 specification enforces more strict rules. Nodes and properties should
 be comprised only of ASCII characters 'a' to 'z', '0' to
 '9', ',', '.', '_', '+', '#', '?', and '-'. Node names additionally
@@ -792,7 +742,7 @@
 --------------------------------
   These are all that are currently required. However, it is strongly
   recommended that you expose PCI host bridges as documented in the
-  PCI binding to open firmware, and your interrupt tree as documented
+  PCI binding to Open Firmware, and your interrupt tree as documented
   in OF interrupt tree specification.
 
   a) The root node
@@ -802,20 +752,12 @@
     - model : this is your board name/model
     - #address-cells : address representation for "root" devices
     - #size-cells: the size representation for "root" devices
-    - device_type : This property shouldn't be necessary. However, if
-      you decide to create a device_type for your root node, make sure it
-      is _not_ "chrp" unless your platform is a pSeries or PAPR compliant
-      one for 64-bit, or a CHRP-type machine for 32-bit as this will
-      matched by the kernel this way.
-
-  Additionally, some recommended properties are:
-
     - compatible : the board "family" generally finds its way here,
       for example, if you have 2 board models with a similar layout,
       that typically get driven by the same platform code in the
-      kernel, you would use a different "model" property but put a
-      value in "compatible". The kernel doesn't directly use that
-      value but it is generally useful.
+      kernel, you would specify the exact board model in the
+      compatible property followed by an entry that represents the SoC
+      model.
 
   The root node is also generally where you add additional properties
   specific to your board like the serial number if any, that sort of
@@ -841,8 +783,11 @@
 
   So under /cpus, you are supposed to create a node for every CPU on
   the machine. There is no specific restriction on the name of the
-  CPU, though It's common practice to call it PowerPC,<name>. For
+  CPU, though it's common to call it <architecture>,<core>. For
   example, Apple uses PowerPC,G5 while IBM uses PowerPC,970FX.
+  However, the Generic Names convention suggests that it would be
+  better to simply use 'cpu' for each cpu node and use the compatible
+  property to identify the specific cpu core.
 
   Required properties:
 
@@ -923,7 +868,7 @@
 
   e) The /chosen node
 
-  This node is a bit "special". Normally, that's where open firmware
+  This node is a bit "special". Normally, that's where Open Firmware
   puts some variable environment information, like the arguments, or
   the default input/output devices.
 
@@ -940,11 +885,7 @@
       console device if any. Typically, if you have serial devices on
       your board, you may want to put the full path to the one set as
       the default console in the firmware here, for the kernel to pick
-      it up as its own default console. If you look at the function
-      set_preferred_console() in arch/ppc64/kernel/setup.c, you'll see
-      that the kernel tries to find out the default console and has
-      knowledge of various types like 8250 serial ports. You may want
-      to extend this function to add your own.
+      it up as its own default console.
 
   Note that u-boot creates and fills in the chosen node for platforms
   that use it.
@@ -955,23 +896,23 @@
 
   f) the /soc<SOCname> node
 
-  This node is used to represent a system-on-a-chip (SOC) and must be
-  present if the processor is a SOC. The top-level soc node contains
-  information that is global to all devices on the SOC. The node name
-  should contain a unit address for the SOC, which is the base address
-  of the memory-mapped register set for the SOC. The name of an soc
+  This node is used to represent a system-on-a-chip (SoC) and must be
+  present if the processor is a SoC. The top-level soc node contains
+  information that is global to all devices on the SoC. The node name
+  should contain a unit address for the SoC, which is the base address
+  of the memory-mapped register set for the SoC. The name of an SoC
   node should start with "soc", and the remainder of the name should
   represent the part number for the soc.  For example, the MPC8540's
   soc node would be called "soc8540".
 
   Required properties:
 
-    - device_type : Should be "soc"
     - ranges : Should be defined as specified in 1) to describe the
-      translation of SOC addresses for memory mapped SOC registers.
-    - bus-frequency: Contains the bus frequency for the SOC node.
+      translation of SoC addresses for memory mapped SoC registers.
+    - bus-frequency: Contains the bus frequency for the SoC node.
       Typically, the value of this field is filled in by the boot
       loader.
+    - compatible : Exact model of the SoC
 
 
   Recommended properties:
@@ -1155,12 +1096,13 @@
 
   - An example of code for iterating nodes & retrieving properties
     directly from the flattened tree format can be found in the kernel
-    file arch/ppc64/kernel/prom.c, look at scan_flat_dt() function,
+    file drivers/of/fdt.c.  Look at the of_scan_flat_dt() function,
     its usage in early_init_devtree(), and the corresponding various
     early_init_dt_scan_*() callbacks. That code can be re-used in a
     GPL bootloader, and as the author of that code, I would be happy
     to discuss possible free licensing to any vendor who wishes to
     integrate all or part of this code into a non-GPL bootloader.
+    (reference needed; who is 'I' here? ---gcl Jan 31, 2011)
 
 
 
@@ -1203,18 +1145,19 @@
 2) Representing devices without a current OF specification
 ----------------------------------------------------------
 
-Currently, there are many devices on SOCs that do not have a standard
-representation pre-defined as part of the open firmware
-specifications, mainly because the boards that contain these SOCs are
-not currently booted using open firmware.   This section contains
-descriptions for the SOC devices for which new nodes have been
-defined; this list will expand as more and more SOC-containing
-platforms are moved over to use the flattened-device-tree model.
+Currently, there are many devices on SoCs that do not have a standard
+representation defined as part of the Open Firmware specifications,
+mainly because the boards that contain these SoCs are not currently
+booted using Open Firmware.  Binding documentation for new devices
+should be added to the Documentation/devicetree/bindings directory.
+That directory will expand as device tree support is added to more and
+more SoCs.
+
 
 VII - Specifying interrupt information for devices
 ===================================================
 
-The device tree represents the busses and devices of a hardware
+The device tree represents the buses and devices of a hardware
 system in a form similar to the physical bus topology of the
 hardware.
 
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index ab27485..d6f5255 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -368,14 +368,6 @@
 
 -----------------------------
 
-What:	__do_IRQ all in one fits nothing interrupt handler
-When:	2.6.32
-Why:	__do_IRQ was kept for easy migration to the type flow handlers.
-	More than two years of migration time is enough.
-Who:	Thomas Gleixner <tglx@linutronix.de>
-
------------------------------
-
 What:	fakephp and associated sysfs files in /sys/bus/pci/slots/
 When:	2011
 Why:	In 2.6.27, the semantics of /sys/bus/pci/slots was redefined to
@@ -622,3 +614,26 @@
 Who:	Jean Delvare <khali@linux-fr.org>
 
 ----------------------------
+
+What:	xt_connlimit rev 0
+When:	2012
+Who:	Jan Engelhardt <jengelh@medozas.de>
+Files:	net/netfilter/xt_connlimit.c
+
+----------------------------
+
+What:	noswapaccount kernel command line parameter
+When:	2.6.40
+Why:	The original implementation of memsw feature enabled by
+	CONFIG_CGROUP_MEM_RES_CTLR_SWAP could be disabled by the noswapaccount
+	kernel parameter (introduced in 2.6.29-rc1). Later on, this decision
+	turned out to be not ideal because we cannot have the feature compiled
+	in and disabled by default and let only interested to enable it
+	(e.g. general distribution kernels might need it). Therefore we have
+	added swapaccount[=0|1] parameter (introduced in 2.6.37) which provides
+	the both possibilities. If we remove noswapaccount we will have
+	less command line parameters with the same functionality and we
+	can also cleanup the parameter handling a bit ().
+Who:	Michal Hocko <mhocko@suse.cz>
+
+----------------------------
diff --git a/Documentation/filesystems/ntfs.txt b/Documentation/filesystems/ntfs.txt
index 6ef8cf3..933bc66 100644
--- a/Documentation/filesystems/ntfs.txt
+++ b/Documentation/filesystems/ntfs.txt
@@ -460,6 +460,8 @@
 2.1.30:
 	- Fix writev() (it kept writing the first segment over and over again
 	  instead of moving onto subsequent segments).
+	- Fix crash in ntfs_mft_record_alloc() when mapping the new extent mft
+	  record failed.
 2.1.29:
 	- Fix a deadlock when mounting read-write.
 2.1.28:
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index b72e071..89835a4 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -43,11 +43,11 @@
 	AVR32	AVR32 architecture is enabled.
 	AX25	Appropriate AX.25 support is enabled.
 	BLACKFIN Blackfin architecture is enabled.
+	DRM	Direct Rendering Management support is enabled.
+	DYNAMIC_DEBUG Build in debug messages and enable them at runtime
 	EDD	BIOS Enhanced Disk Drive Services (EDD) is enabled
 	EFI	EFI Partitioning (GPT) is enabled
 	EIDE	EIDE/ATAPI support is enabled.
-	DRM	Direct Rendering Management support is enabled.
-	DYNAMIC_DEBUG Build in debug messages and enable them at runtime
 	FB	The frame buffer device is enabled.
 	GCOV	GCOV profiling is enabled.
 	HW	Appropriate hardware is enabled.
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index dc73bc54..d9da7e1 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -39,6 +39,9 @@
 #include <limits.h>
 #include <stddef.h>
 #include <signal.h>
+#include <pwd.h>
+#include <grp.h>
+
 #include <linux/virtio_config.h>
 #include <linux/virtio_net.h>
 #include <linux/virtio_blk.h>
@@ -298,20 +301,27 @@
 
 	/*
 	 * We use a private mapping (ie. if we write to the page, it will be
-	 * copied).
+	 * copied). We allocate an extra two pages PROT_NONE to act as guard
+	 * pages against read/write attempts that exceed allocated space.
 	 */
-	addr = mmap(NULL, getpagesize() * num,
-		    PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, fd, 0);
+	addr = mmap(NULL, getpagesize() * (num+2),
+		    PROT_NONE, MAP_PRIVATE, fd, 0);
+
 	if (addr == MAP_FAILED)
 		err(1, "Mmapping %u pages of /dev/zero", num);
 
+	if (mprotect(addr + getpagesize(), getpagesize() * num,
+		     PROT_READ|PROT_WRITE) == -1)
+		err(1, "mprotect rw %u pages failed", num);
+
 	/*
 	 * One neat mmap feature is that you can close the fd, and it
 	 * stays mapped.
 	 */
 	close(fd);
 
-	return addr;
+	/* Return address after PROT_NONE page */
+	return addr + getpagesize();
 }
 
 /* Get some more pages for a device. */
@@ -343,7 +353,7 @@
 	 * done to it.  This allows us to share untouched memory between
 	 * Guests.
 	 */
-	if (mmap(addr, len, PROT_READ|PROT_WRITE|PROT_EXEC,
+	if (mmap(addr, len, PROT_READ|PROT_WRITE,
 		 MAP_FIXED|MAP_PRIVATE, fd, offset) != MAP_FAILED)
 		return;
 
@@ -573,10 +583,10 @@
 			    unsigned int line)
 {
 	/*
-	 * We have to separately check addr and addr+size, because size could
-	 * be huge and addr + size might wrap around.
+	 * Check if the requested address and size exceeds the allocated memory,
+	 * or addr + size wraps around.
 	 */
-	if (addr >= guest_limit || addr + size >= guest_limit)
+	if ((addr + size) > guest_limit || (addr + size) < addr)
 		errx(1, "%s:%i: Invalid address %#lx", __FILE__, line, addr);
 	/*
 	 * We return a pointer for the caller's convenience, now we know it's
@@ -1872,6 +1882,8 @@
 	{ "block", 1, NULL, 'b' },
 	{ "rng", 0, NULL, 'r' },
 	{ "initrd", 1, NULL, 'i' },
+	{ "username", 1, NULL, 'u' },
+	{ "chroot", 1, NULL, 'c' },
 	{ NULL },
 };
 static void usage(void)
@@ -1894,6 +1906,12 @@
 	/* If they specify an initrd file to load. */
 	const char *initrd_name = NULL;
 
+	/* Password structure for initgroups/setres[gu]id */
+	struct passwd *user_details = NULL;
+
+	/* Directory to chroot to */
+	char *chroot_path = NULL;
+
 	/* Save the args: we "reboot" by execing ourselves again. */
 	main_args = argv;
 
@@ -1950,6 +1968,14 @@
 		case 'i':
 			initrd_name = optarg;
 			break;
+		case 'u':
+			user_details = getpwnam(optarg);
+			if (!user_details)
+				err(1, "getpwnam failed, incorrect username?");
+			break;
+		case 'c':
+			chroot_path = optarg;
+			break;
 		default:
 			warnx("Unknown argument %s", argv[optind]);
 			usage();
@@ -2021,6 +2047,37 @@
 	/* If we exit via err(), this kills all the threads, restores tty. */
 	atexit(cleanup_devices);
 
+	/* If requested, chroot to a directory */
+	if (chroot_path) {
+		if (chroot(chroot_path) != 0)
+			err(1, "chroot(\"%s\") failed", chroot_path);
+
+		if (chdir("/") != 0)
+			err(1, "chdir(\"/\") failed");
+
+		verbose("chroot done\n");
+	}
+
+	/* If requested, drop privileges */
+	if (user_details) {
+		uid_t u;
+		gid_t g;
+
+		u = user_details->pw_uid;
+		g = user_details->pw_gid;
+
+		if (initgroups(user_details->pw_name, g) != 0)
+			err(1, "initgroups failed");
+
+		if (setresgid(g, g, g) != 0)
+			err(1, "setresgid failed");
+
+		if (setresuid(u, u, u) != 0)
+			err(1, "setresuid failed");
+
+		verbose("Dropping privileges completed\n");
+	}
+
 	/* Finally, run the Guest.  This doesn't return. */
 	run_guest();
 }
diff --git a/Documentation/lguest/lguest.txt b/Documentation/lguest/lguest.txt
index 6ccaf8e..dad9997 100644
--- a/Documentation/lguest/lguest.txt
+++ b/Documentation/lguest/lguest.txt
@@ -117,6 +117,11 @@
     
   for general information on how to get bridging to work.
 
+- Random number generation. Using the --rng option will provide a
+  /dev/hwrng in the guest that will read from the host's /dev/random.
+  Use this option in conjunction with rng-tools (see ../hw_random.txt)
+  to provide entropy to the guest kernel's /dev/random.
+
 There is a helpful mailing list at http://ozlabs.org/mailman/listinfo/lguest
 
 Good luck!
diff --git a/Documentation/networking/Makefile b/Documentation/networking/Makefile
index 5aba7a3..24c308d 100644
--- a/Documentation/networking/Makefile
+++ b/Documentation/networking/Makefile
@@ -4,6 +4,8 @@
 # List of programs to build
 hostprogs-y := ifenslave
 
+HOSTCFLAGS_ifenslave.o += -I$(objtree)/usr/include
+
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
 
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index 77f0cdd..18afcd8 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -1,4 +1,4 @@
-[state: 21-11-2010]
+[state: 27-01-2011]
 
 BATMAN-ADV
 ----------
@@ -67,15 +67,16 @@
 folder:
 
 #  ls  /sys/class/net/bat0/mesh/
-#  aggregated_ogms  bonding  fragmentation  orig_interval
-#  vis_mode
+#  aggregated_ogms  gw_bandwidth  hop_penalty
+#  bonding          gw_mode       orig_interval
+#  fragmentation    gw_sel_class  vis_mode
 
 
 There is a special folder for debugging informations:
 
 #  ls /sys/kernel/debug/batman_adv/bat0/
-#  originators  socket  transtable_global  transtable_local
-#  vis_data
+#  gateways     socket        transtable_global  vis_data
+#  originators  softif_neigh  transtable_local
 
 
 Some of the files contain all sort of status information  regard-
@@ -230,9 +231,8 @@
 Please send us comments, experiences, questions, anything :)
 
 IRC:            #batman   on   irc.freenode.org
-Mailing-list:   b.a.t.m.a.n@b.a.t.m.a.n@lists.open-mesh.org
-                (optional   subscription   at
-                 https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
+Mailing-list:   b.a.t.m.a.n@open-mesh.org (optional  subscription
+          at https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
 
 You can also contact the Authors:
 
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 5dc6387..25d2f41 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -49,7 +49,8 @@
 3.3	Configuring Bonding Manually with Ifenslave
 3.3.1		Configuring Multiple Bonds Manually
 3.4	Configuring Bonding Manually via Sysfs
-3.5	Overriding Configuration for Special Cases
+3.5	Configuration with Interfaces Support
+3.6	Overriding Configuration for Special Cases
 
 4. Querying Bonding Configuration
 4.1	Bonding Configuration
@@ -161,8 +162,8 @@
 default kernel source include directory.
 
 SECOND IMPORTANT NOTE:
-	If you plan to configure bonding using sysfs, you do not need
-to use ifenslave.
+	If you plan to configure bonding using sysfs or using the
+/etc/network/interfaces file, you do not need to use ifenslave.
 
 2. Bonding Driver Options
 =========================
@@ -779,22 +780,26 @@
 
 	You can configure bonding using either your distro's network
 initialization scripts, or manually using either ifenslave or the
-sysfs interface.  Distros generally use one of two packages for the
-network initialization scripts: initscripts or sysconfig.  Recent
-versions of these packages have support for bonding, while older
+sysfs interface.  Distros generally use one of three packages for the
+network initialization scripts: initscripts, sysconfig or interfaces.
+Recent versions of these packages have support for bonding, while older
 versions do not.
 
 	We will first describe the options for configuring bonding for
-distros using versions of initscripts and sysconfig with full or
-partial support for bonding, then provide information on enabling
+distros using versions of initscripts, sysconfig and interfaces with full
+or partial support for bonding, then provide information on enabling
 bonding without support from the network initialization scripts (i.e.,
 older versions of initscripts or sysconfig).
 
-	If you're unsure whether your distro uses sysconfig or
-initscripts, or don't know if it's new enough, have no fear.
+	If you're unsure whether your distro uses sysconfig,
+initscripts or interfaces, or don't know if it's new enough, have no fear.
 Determining this is fairly straightforward.
 
-	First, issue the command:
+	First, look for a file called interfaces in /etc/network directory.
+If this file is present in your system, then your system use interfaces. See
+Configuration with Interfaces Support.
+
+	Else, issue the command:
 
 $ rpm -qf /sbin/ifup
 
@@ -1327,8 +1332,62 @@
 echo +eth2 > /sys/class/net/bond1/bonding/slaves
 echo +eth3 > /sys/class/net/bond1/bonding/slaves
 
-3.5 Overriding Configuration for Special Cases
+3.5 Configuration with Interfaces Support
+-----------------------------------------
+
+        This section applies to distros which use /etc/network/interfaces file
+to describe network interface configuration, most notably Debian and it's
+derivatives.
+
+	The ifup and ifdown commands on Debian don't support bonding out of
+the box. The ifenslave-2.6 package should be installed to provide bonding
+support.  Once installed, this package will provide bond-* options to be used
+into /etc/network/interfaces.
+
+	Note that ifenslave-2.6 package will load the bonding module and use
+the ifenslave command when appropriate.
+
+Example Configurations
+----------------------
+
+In /etc/network/interfaces, the following stanza will configure bond0, in
+active-backup mode, with eth0 and eth1 as slaves.
+
+auto bond0
+iface bond0 inet dhcp
+	bond-slaves eth0 eth1
+	bond-mode active-backup
+	bond-miimon 100
+	bond-primary eth0 eth1
+
+If the above configuration doesn't work, you might have a system using
+upstart for system startup. This is most notably true for recent
+Ubuntu versions. The following stanza in /etc/network/interfaces will
+produce the same result on those systems.
+
+auto bond0
+iface bond0 inet dhcp
+	bond-slaves none
+	bond-mode active-backup
+	bond-miimon 100
+
+auto eth0
+iface eth0 inet manual
+	bond-master bond0
+	bond-primary eth0 eth1
+
+auto eth1
+iface eth1 inet manual
+	bond-master bond0
+	bond-primary eth0 eth1
+
+For a full list of bond-* supported options in /etc/network/interfaces and some
+more advanced examples tailored to you particular distros, see the files in
+/usr/share/doc/ifenslave-2.6.
+
+3.6 Overriding Configuration for Special Cases
 ----------------------------------------------
+
 When using the bonding driver, the physical port which transmits a frame is
 typically selected by the bonding driver, and is not relevant to the user or
 system administrator.  The output port is simply selected using the policies of
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index d99940d..d3d653a 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -187,7 +187,7 @@
 tcp_dsack - BOOLEAN
 	Allows TCP to send "duplicate" SACKs.
 
-tcp_ecn - BOOLEAN
+tcp_ecn - INTEGER
 	Enable Explicit Congestion Notification (ECN) in TCP. ECN is only
 	used when both ends of the TCP flow support it. It is useful to
 	avoid losses due to congestion (when the bottleneck router supports
@@ -280,6 +280,17 @@
 	more aggressively. Let me to remind again: each orphan eats
 	up to ~64K of unswappable memory.
 
+tcp_max_ssthresh - INTEGER
+	Limited Slow-Start for TCP with large congestion windows (cwnd) defined in
+	RFC3742. Limited slow-start is a mechanism to limit growth of the cwnd
+	on the region where cwnd is larger than tcp_max_ssthresh. TCP increases cwnd
+	by at most tcp_max_ssthresh segments, and by at least tcp_max_ssthresh/2
+	segments per RTT when the cwnd is above tcp_max_ssthresh.
+	If TCP connection increased cwnd to thousands (or tens of thousands) segments,
+	and thousands of packets were being dropped during slow-start, you can set
+	tcp_max_ssthresh to improve performance for new TCP connection.
+	Default: 0 (off)
+
 tcp_max_syn_backlog - INTEGER
 	Maximal number of remembered connection requests, which are
 	still did not receive an acknowledgment from connecting client.
diff --git a/Documentation/scheduler/sched-stats.txt b/Documentation/scheduler/sched-stats.txt
index 01e6940..1cd5d51 100644
--- a/Documentation/scheduler/sched-stats.txt
+++ b/Documentation/scheduler/sched-stats.txt
@@ -1,3 +1,7 @@
+Version 15 of schedstats dropped counters for some sched_yield:
+yld_exp_empty, yld_act_empty and yld_both_empty. Otherwise, it is
+identical to version 14.
+
 Version 14 of schedstats includes support for sched_domains, which hit the
 mainline kernel in 2.6.20 although it is identical to the stats from version
 12 which was in the kernel from 2.6.13-2.6.19 (version 13 never saw a kernel
@@ -28,32 +32,25 @@
 
 CPU statistics
 --------------
-cpu<N> 1 2 3 4 5 6 7 8 9 10 11 12
+cpu<N> 1 2 3 4 5 6 7 8 9
 
-NOTE: In the sched_yield() statistics, the active queue is considered empty
-    if it has only one process in it, since obviously the process calling
-    sched_yield() is that process.
-
-First four fields are sched_yield() statistics:
-     1) # of times both the active and the expired queue were empty
-     2) # of times just the active queue was empty
-     3) # of times just the expired queue was empty
-     4) # of times sched_yield() was called
+First field is a sched_yield() statistic:
+     1) # of times sched_yield() was called
 
 Next three are schedule() statistics:
-     5) # of times we switched to the expired queue and reused it
-     6) # of times schedule() was called
-     7) # of times schedule() left the processor idle
+     2) # of times we switched to the expired queue and reused it
+     3) # of times schedule() was called
+     4) # of times schedule() left the processor idle
 
 Next two are try_to_wake_up() statistics:
-     8) # of times try_to_wake_up() was called
-     9) # of times try_to_wake_up() was called to wake up the local cpu
+     5) # of times try_to_wake_up() was called
+     6) # of times try_to_wake_up() was called to wake up the local cpu
 
 Next three are statistics describing scheduling latency:
-    10) sum of all time spent running by tasks on this processor (in jiffies)
-    11) sum of all time spent waiting to run by tasks on this processor (in
+     7) sum of all time spent running by tasks on this processor (in jiffies)
+     8) sum of all time spent waiting to run by tasks on this processor (in
         jiffies)
-    12) # of timeslices run on this cpu
+     9) # of timeslices run on this cpu
 
 
 Domain statistics
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index 16ae430..0caf77e 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -296,6 +296,7 @@
 =============
   laptop	Basic Laptop config (default)
   hp-laptop	HP laptops, e g G60
+  asus		Asus K52JU, Lenovo G560
   dell-laptop	Dell laptops
   dell-vostro	Dell Vostro
   olpc-xo-1_5	OLPC XO 1.5
diff --git a/Documentation/sound/alsa/soc/codec.txt b/Documentation/sound/alsa/soc/codec.txt
index 37ba3a7..bce23a4 100644
--- a/Documentation/sound/alsa/soc/codec.txt
+++ b/Documentation/sound/alsa/soc/codec.txt
@@ -27,42 +27,38 @@
 
 1 - Codec DAI and PCM configuration
 -----------------------------------
-Each codec driver must have a struct snd_soc_codec_dai to define its DAI and
+Each codec driver must have a struct snd_soc_dai_driver to define its DAI and
 PCM capabilities and operations. This struct is exported so that it can be
 registered with the core by your machine driver.
 
 e.g.
 
-struct snd_soc_codec_dai wm8731_dai = {
-	.name = "WM8731",
-	/* playback capabilities */
+static struct snd_soc_dai_ops wm8731_dai_ops = {
+	.prepare	= wm8731_pcm_prepare,
+	.hw_params	= wm8731_hw_params,
+	.shutdown	= wm8731_shutdown,
+	.digital_mute	= wm8731_mute,
+	.set_sysclk	= wm8731_set_dai_sysclk,
+	.set_fmt	= wm8731_set_dai_fmt,
+};
+
+struct snd_soc_dai_driver wm8731_dai = {
+	.name = "wm8731-hifi",
 	.playback = {
 		.stream_name = "Playback",
 		.channels_min = 1,
 		.channels_max = 2,
 		.rates = WM8731_RATES,
 		.formats = WM8731_FORMATS,},
-	/* capture capabilities */
 	.capture = {
 		.stream_name = "Capture",
 		.channels_min = 1,
 		.channels_max = 2,
 		.rates = WM8731_RATES,
 		.formats = WM8731_FORMATS,},
-	/* pcm operations - see section 4 below */
-	.ops = {
-		.prepare = wm8731_pcm_prepare,
-		.hw_params = wm8731_hw_params,
-		.shutdown = wm8731_shutdown,
-	},
-	/* DAI operations - see DAI.txt */
-	.dai_ops = {
-		.digital_mute = wm8731_mute,
-		.set_sysclk = wm8731_set_dai_sysclk,
-		.set_fmt = wm8731_set_dai_fmt,
-	}
+	.ops = &wm8731_dai_ops,
+	.symmetric_rates = 1,
 };
-EXPORT_SYMBOL_GPL(wm8731_dai);
 
 
 2 - Codec control IO
@@ -186,13 +182,14 @@
 
 i.e.
 
-static int wm8974_mute(struct snd_soc_codec *codec,
-	struct snd_soc_codec_dai *dai, int mute)
+static int wm8974_mute(struct snd_soc_dai *dai, int mute)
 {
-	u16 mute_reg = wm8974_read_reg_cache(codec, WM8974_DAC) & 0xffbf;
-	if(mute)
-		wm8974_write(codec, WM8974_DAC, mute_reg | 0x40);
+	struct snd_soc_codec *codec = dai->codec;
+	u16 mute_reg = snd_soc_read(codec, WM8974_DAC) & 0xffbf;
+
+	if (mute)
+		snd_soc_write(codec, WM8974_DAC, mute_reg | 0x40);
 	else
-		wm8974_write(codec, WM8974_DAC, mute_reg);
+		snd_soc_write(codec, WM8974_DAC, mute_reg);
 	return 0;
 }
diff --git a/Documentation/sound/alsa/soc/machine.txt b/Documentation/sound/alsa/soc/machine.txt
index 2524c75..3e2ec9c 100644
--- a/Documentation/sound/alsa/soc/machine.txt
+++ b/Documentation/sound/alsa/soc/machine.txt
@@ -12,6 +12,8 @@
 struct snd_soc_card {
 	char *name;
 
+	...
+
 	int (*probe)(struct platform_device *pdev);
 	int (*remove)(struct platform_device *pdev);
 
@@ -22,12 +24,13 @@
 	int (*resume_pre)(struct platform_device *pdev);
 	int (*resume_post)(struct platform_device *pdev);
 
-	/* machine stream operations */
-	struct snd_soc_ops *ops;
+	...
 
 	/* CPU <--> Codec DAI links  */
 	struct snd_soc_dai_link *dai_link;
 	int num_links;
+
+	...
 };
 
 probe()/remove()
@@ -42,11 +45,6 @@
 and DMA is suspended and resumed. Optional.
 
 
-Machine operations
-------------------
-The machine specific audio operations can be set here. Again this is optional.
-
-
 Machine DAI Configuration
 -------------------------
 The machine DAI configuration glues all the codec and CPU DAIs together. It can
@@ -61,8 +59,10 @@
 static struct snd_soc_dai_link corgi_dai = {
 	.name = "WM8731",
 	.stream_name = "WM8731",
-	.cpu_dai = &pxa_i2s_dai,
-	.codec_dai = &wm8731_dai,
+	.cpu_dai_name = "pxa-is2-dai",
+	.codec_dai_name = "wm8731-hifi",
+	.platform_name = "pxa-pcm-audio",
+	.codec_name = "wm8713-codec.0-001a",
 	.init = corgi_wm8731_init,
 	.ops = &corgi_ops,
 };
@@ -77,26 +77,6 @@
 };
 
 
-Machine Audio Subsystem
------------------------
-
-The machine soc device glues the platform, machine and codec driver together.
-Private data can also be set here. e.g.
-
-/* corgi audio private data */
-static struct wm8731_setup_data corgi_wm8731_setup = {
-	.i2c_address = 0x1b,
-};
-
-/* corgi audio subsystem */
-static struct snd_soc_device corgi_snd_devdata = {
-	.machine = &snd_soc_corgi,
-	.platform = &pxa2xx_soc_platform,
-	.codec_dev = &soc_codec_dev_wm8731,
-	.codec_data = &corgi_wm8731_setup,
-};
-
-
 Machine Power Map
 -----------------
 
diff --git a/Documentation/sound/alsa/soc/platform.txt b/Documentation/sound/alsa/soc/platform.txt
index 06d8359..d57efad 100644
--- a/Documentation/sound/alsa/soc/platform.txt
+++ b/Documentation/sound/alsa/soc/platform.txt
@@ -20,9 +20,10 @@
 	int (*trigger)(struct snd_pcm_substream *, int);
 };
 
-The platform driver exports its DMA functionality via struct snd_soc_platform:-
+The platform driver exports its DMA functionality via struct
+snd_soc_platform_driver:-
 
-struct snd_soc_platform {
+struct snd_soc_platform_driver {
 	char *name;
 
 	int (*probe)(struct platform_device *pdev);
@@ -34,6 +35,13 @@
 	int (*pcm_new)(struct snd_card *, struct snd_soc_codec_dai *, struct snd_pcm *);
 	void (*pcm_free)(struct snd_pcm *);
 
+	/*
+	 * For platform caused delay reporting.
+	 * Optional.
+	 */
+	snd_pcm_sframes_t (*delay)(struct snd_pcm_substream *,
+		struct snd_soc_dai *);
+
 	/* platform stream ops */
 	struct snd_pcm_ops *pcm_ops;
 };
diff --git a/Documentation/video4linux/v4l2-controls.txt b/Documentation/video4linux/v4l2-controls.txt
index 8773778..881e7f4 100644
--- a/Documentation/video4linux/v4l2-controls.txt
+++ b/Documentation/video4linux/v4l2-controls.txt
@@ -285,6 +285,9 @@
 The 'new value' union is not used in g_volatile_ctrl. In general controls
 that need to implement g_volatile_ctrl are read-only controls.
 
+Note that if one or more controls in a control cluster are marked as volatile,
+then all the controls in the cluster are seen as volatile.
+
 To mark a control as volatile you have to set the is_volatile flag:
 
 	ctrl = v4l2_ctrl_new_std(&sd->ctrl_handler, ...);
@@ -462,6 +465,15 @@
 Obviously, all controls in the cluster array must be initialized to either
 a valid control or to NULL.
 
+In rare cases you might want to know which controls of a cluster actually
+were set explicitly by the user. For this you can check the 'is_new' flag of
+each control. For example, in the case of a volume/mute cluster the 'is_new'
+flag of the mute control would be set if the user called VIDIOC_S_CTRL for
+mute only. If the user would call VIDIOC_S_EXT_CTRLS for both mute and volume
+controls, then the 'is_new' flag would be 1 for both controls.
+
+The 'is_new' flag is always 1 when called from v4l2_ctrl_handler_setup().
+
 
 VIDIOC_LOG_STATUS Support
 =========================
diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt
index 996a27d..01c513f 100644
--- a/Documentation/workqueue.txt
+++ b/Documentation/workqueue.txt
@@ -190,9 +190,9 @@
 	* Long running CPU intensive workloads which can be better
 	  managed by the system scheduler.
 
-  WQ_FREEZEABLE
+  WQ_FREEZABLE
 
-	A freezeable wq participates in the freeze phase of the system
+	A freezable wq participates in the freeze phase of the system
 	suspend operations.  Work items on the wq are drained and no
 	new work item starts execution until thawed.
 
diff --git a/MAINTAINERS b/MAINTAINERS
index 95482e9..0d83e58 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -162,7 +162,7 @@
 W:	http://serial.sourceforge.net
 S:	Maintained
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
-F:	drivers/serial/8250*
+F:	drivers/tty/serial/8250*
 F:	include/linux/serial_8250.h
 
 8390 NETWORK DRIVERS [WD80x3/SMC-ELITE, SMC-ULTRA, NE2000, 3C503, etc.]
@@ -624,11 +624,15 @@
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 
-ARM/ATMEL AT91RM9200 ARM ARCHITECTURE
+ARM/ATMEL AT91RM9200 AND AT91SAM ARM ARCHITECTURES
 M:	Andrew Victor <linux@maxim.org.za>
+M:	Nicolas Ferre <nicolas.ferre@atmel.com>
+M:	Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:	http://maxim.org.za/at91_26.html
-S:	Maintained
+W:	http://www.linux4sam.org
+S:	Supported
+F:	arch/arm/mach-at91/
 
 ARM/BCMRING ARM ARCHITECTURE
 M:	Jiandong Zheng <jdzheng@broadcom.com>
@@ -888,8 +892,8 @@
 F:	drivers/video/msm/
 F:	drivers/mmc/host/msm_sdcc.c
 F:	drivers/mmc/host/msm_sdcc.h
-F:	drivers/serial/msm_serial.h
-F:	drivers/serial/msm_serial.c
+F:	drivers/tty/serial/msm_serial.h
+F:	drivers/tty/serial/msm_serial.c
 T:	git git://codeaurora.org/quic/kernel/davidb/linux-msm.git
 S:	Maintained
 
@@ -974,6 +978,8 @@
 F:	arch/arm/plat-samsung/
 F:	arch/arm/plat-s3c24xx/
 F:	arch/arm/plat-s5p/
+F:	drivers/*/*s3c2410*
+F:	drivers/*/*/*s3c2410*
 
 ARM/S3C2410 ARM ARCHITECTURE
 M:	Ben Dooks <ben-linux@fluff.org>
@@ -1256,7 +1262,7 @@
 ATMEL AT91 / AT32 SERIAL DRIVER
 M:	Nicolas Ferre <nicolas.ferre@atmel.com>
 S:	Supported
-F:	drivers/serial/atmel_serial.c
+F:	drivers/tty/serial/atmel_serial.c
 
 ATMEL LCDFB DRIVER
 M:	Nicolas Ferre <nicolas.ferre@atmel.com>
@@ -1412,7 +1418,7 @@
 L:	uclinux-dist-devel@blackfin.uclinux.org
 W:	http://blackfin.uclinux.org
 S:	Supported
-F:	drivers/serial/bfin_5xx.c
+F:	drivers/tty/serial/bfin_5xx.c
 
 BLACKFIN WATCHDOG DRIVER
 M:	Mike Frysinger <vapier.adi@gmail.com>
@@ -1687,6 +1693,7 @@
 F:	scripts/checkpatch.pl
 
 CISCO VIC ETHERNET NIC DRIVER
+M:	Christian Benvenuti <benve@cisco.com>
 M:	Vasanthy Kolluri <vkolluri@cisco.com>
 M:	Roopa Prabhu <roprabhu@cisco.com>
 M:	David Wang <dwang2@cisco.com>
@@ -1877,7 +1884,7 @@
 W:	http://developer.axis.com
 S:	Maintained
 F:	arch/cris/
-F:	drivers/serial/crisv10.*
+F:	drivers/tty/serial/crisv10.*
 
 CRYPTO API
 M:	Herbert Xu <herbert@gondor.apana.org.au>
@@ -2120,6 +2127,7 @@
 F:	fs/dlm/
 
 DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
+M:	Vinod Koul <vinod.koul@intel.com>
 M:	Dan Williams <dan.j.williams@intel.com>
 S:	Supported
 F:	drivers/dma/
@@ -2216,7 +2224,7 @@
 DZ DECSTATION DZ11 SERIAL DRIVER
 M:	"Maciej W. Rozycki" <macro@linux-mips.org>
 S:	Maintained
-F:	drivers/serial/dz.*
+F:	drivers/tty/serial/dz.*
 
 EATA-DMA SCSI DRIVER
 M:	Michael Neuffer <mike@i-Connect.Net>
@@ -2643,7 +2651,7 @@
 M:	Timur Tabi <timur@freescale.com>
 L:	linuxppc-dev@lists.ozlabs.org
 S:	Supported
-F:	drivers/serial/ucc_uart.c
+F:	drivers/tty/serial/ucc_uart.c
 
 FREESCALE SOC SOUND DRIVERS
 M:	Timur Tabi <timur@freescale.com>
@@ -2768,6 +2776,15 @@
 F:	drivers/isdn/gigaset/
 F:	include/linux/gigaset_dev.h
 
+GPIO SUBSYSTEM
+M:	Grant Likely <grant.likely@secretlab.ca>
+L:	linux-kernel@vger.kernel.org
+S:	Maintained
+T:	git git://git.secretlab.ca/git/linux-2.6.git
+F:	Documentation/gpio/gpio.txt
+F:	drivers/gpio/
+F:	include/linux/gpio*
+
 GRETH 10/100/1G Ethernet MAC device driver
 M:	Kristoffer Glembo <kristoffer@gaisler.com>
 L:	netdev@vger.kernel.org
@@ -3135,6 +3152,12 @@
 F:	net/ieee802154/
 F:	drivers/ieee802154/
 
+IKANOS/ADI EAGLE ADSL USB DRIVER
+M:	Matthieu Castet <castet.matthieu@free.fr>
+M:	Stanislaw Gruszka <stf_xl@wp.pl>
+S:	Maintained
+F:	drivers/usb/atm/ueagle-atm.c
+
 INTEGRITY MEASUREMENT ARCHITECTURE (IMA)
 M:	Mimi Zohar <zohar@us.ibm.com>
 S:	Supported
@@ -3146,7 +3169,7 @@
 F:	drivers/video/imsttfb.c
 
 INFINIBAND SUBSYSTEM
-M:	Roland Dreier <rolandd@cisco.com>
+M:	Roland Dreier <roland@kernel.org>
 M:	Sean Hefty <sean.hefty@intel.com>
 M:	Hal Rosenstock <hal.rosenstock@gmail.com>
 L:	linux-rdma@vger.kernel.org
@@ -3349,7 +3372,7 @@
 M:	Pat Gefre <pfg@sgi.com>
 L:	linux-serial@vger.kernel.org
 S:	Maintained
-F:	drivers/serial/ioc3_serial.c
+F:	drivers/tty/serial/ioc3_serial.c
 
 IP MASQUERADING
 M:	Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
@@ -3526,7 +3549,7 @@
 M:	Breno Leitao <leitao@linux.vnet.ibm.com>
 L:	linux-serial@vger.kernel.org
 S:	Maintained
-F:	drivers/serial/jsm/
+F:	drivers/tty/serial/jsm/
 
 K10TEMP HARDWARE MONITORING DRIVER
 M:	Clemens Ladisch <clemens@ladisch.de>
@@ -3669,6 +3692,28 @@
 F:	include/keys/
 F:	security/keys/
 
+KEYS-TRUSTED
+M:	David Safford <safford@watson.ibm.com>
+M:	Mimi Zohar <zohar@us.ibm.com>
+L:	linux-security-module@vger.kernel.org
+L:	keyrings@linux-nfs.org
+S:	Supported
+F:	Documentation/keys-trusted-encrypted.txt
+F:	include/keys/trusted-type.h
+F:	security/keys/trusted.c
+F:	security/keys/trusted.h
+
+KEYS-ENCRYPTED
+M:	Mimi Zohar <zohar@us.ibm.com>
+M:	David Safford <safford@watson.ibm.com>
+L:	linux-security-module@vger.kernel.org
+L:	keyrings@linux-nfs.org
+S:	Supported
+F:	Documentation/keys-trusted-encrypted.txt
+F:	include/keys/encrypted-type.h
+F:	security/keys/encrypted.c
+F:	security/keys/encrypted.h
+
 KGDB / KDB /debug_core
 M:	Jason Wessel <jason.wessel@windriver.com>
 W:	http://kgdb.wiki.kernel.org/
@@ -3676,7 +3721,7 @@
 S:	Maintained
 F:	Documentation/DocBook/kgdb.tmpl
 F:	drivers/misc/kgdbts.c
-F:	drivers/serial/kgdboc.c
+F:	drivers/tty/serial/kgdboc.c
 F:	include/linux/kdb.h
 F:	include/linux/kgdb.h
 F:	kernel/debug/
@@ -4557,7 +4602,7 @@
 
 OPEN FIRMWARE AND FLATTENED DEVICE TREE
 M:	Grant Likely <grant.likely@secretlab.ca>
-L:	devicetree-discuss@lists.ozlabs.org
+L:	devicetree-discuss@lists.ozlabs.org (moderated for non-subscribers)
 W:	http://fdt.secretlab.ca
 T:	git git://git.secretlab.ca/git/linux-2.6.git
 S:	Maintained
@@ -5518,12 +5563,11 @@
 F:	drivers/scsi/be2iscsi/
 
 SERVER ENGINES 10Gbps NIC - BladeEngine 2 DRIVER
-M:	Sathya Perla <sathyap@serverengines.com>
-M:	Subbu Seetharaman <subbus@serverengines.com>
-M:	Sarveshwar Bandi <sarveshwarb@serverengines.com>
-M:	Ajit Khaparde <ajitk@serverengines.com>
+M:	Sathya Perla <sathya.perla@emulex.com>
+M:	Subbu Seetharaman <subbu.seetharaman@emulex.com>
+M:	Ajit Khaparde <ajit.khaparde@emulex.com>
 L:	netdev@vger.kernel.org
-W:	http://www.serverengines.com
+W:	http://www.emulex.com
 S:	Supported
 F:	drivers/net/benet/
 
@@ -5545,7 +5589,7 @@
 L:	linux-ia64@vger.kernel.org
 S:	Supported
 F:	Documentation/ia64/serial.txt
-F:	drivers/serial/ioc?_serial.c
+F:	drivers/tty/serial/ioc?_serial.c
 F:	include/linux/ioc?.h
 
 SGI VISUAL WORKSTATION 320 AND 540
@@ -5567,7 +5611,7 @@
 S:	Maintained
 F:	Documentation/arm/Sharp-LH/ADC-LH7-Touchscreen
 F:	arch/arm/mach-lh7a40x/
-F:	drivers/serial/serial_lh7a40x.c
+F:	drivers/tty/serial/serial_lh7a40x.c
 F:	drivers/usb/gadget/lh7a40*
 F:	drivers/usb/host/ohci-lh7a40*
 
@@ -5583,18 +5627,20 @@
 
 SIMTEC EB110ATX (Chalice CATS)
 P:	Ben Dooks
-M:	Vincent Sanders <support@simtec.co.uk>
+P:	Vincent Sanders <vince@simtec.co.uk>
+M:	Simtec Linux Team <linux@simtec.co.uk>
 W:	http://www.simtec.co.uk/products/EB110ATX/
 S:	Supported
 
 SIMTEC EB2410ITX (BAST)
 P:	Ben Dooks
-M:	Vincent Sanders <support@simtec.co.uk>
+P:	Vincent Sanders <vince@simtec.co.uk>
+M:	Simtec Linux Team <linux@simtec.co.uk>
 W:	http://www.simtec.co.uk/products/EB2410ITX/
 S:	Supported
-F:	arch/arm/mach-s3c2410/
-F:	drivers/*/*s3c2410*
-F:	drivers/*/*/*s3c2410*
+F:	arch/arm/mach-s3c2410/mach-bast.c
+F:	arch/arm/mach-s3c2410/bast-ide.c
+F:	arch/arm/mach-s3c2410/bast-irq.c
 
 TI DAVINCI MACHINE SUPPORT
 M:	Kevin Hilman <khilman@deeprootsystems.com>
@@ -5787,14 +5833,14 @@
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6.git
 S:	Maintained
-F:	drivers/serial/suncore.c
-F:	drivers/serial/suncore.h
-F:	drivers/serial/sunhv.c
-F:	drivers/serial/sunsab.c
-F:	drivers/serial/sunsab.h
-F:	drivers/serial/sunsu.c
-F:	drivers/serial/sunzilog.c
-F:	drivers/serial/sunzilog.h
+F:	drivers/tty/serial/suncore.c
+F:	drivers/tty/serial/suncore.h
+F:	drivers/tty/serial/sunhv.c
+F:	drivers/tty/serial/sunsab.c
+F:	drivers/tty/serial/sunsab.h
+F:	drivers/tty/serial/sunsu.c
+F:	drivers/tty/serial/sunzilog.c
+F:	drivers/tty/serial/sunzilog.h
 
 SPEAR PLATFORM SUPPORT
 M:	Viresh Kumar <viresh.kumar@st.com>
@@ -6124,8 +6170,8 @@
 M:	Greg Kroah-Hartman <gregkh@suse.de>
 S:	Maintained
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
-F:	drivers/char/tty_*
-F:	drivers/serial/serial_core.c
+F:	drivers/tty/*
+F:	drivers/tty/serial/serial_core.c
 F:	include/linux/serial_core.h
 F:	include/linux/serial.h
 F:	include/linux/tty.h
@@ -6569,6 +6615,16 @@
 F:	drivers/char/virtio_console.c
 F:	include/linux/virtio_console.h
 
+VIRTIO CORE, NET AND BLOCK DRIVERS
+M:	Rusty Russell <rusty@rustcorp.com.au>
+M:	"Michael S. Tsirkin" <mst@redhat.com>
+L:	virtualization@lists.linux-foundation.org
+S:	Maintained
+F:	drivers/virtio/
+F:	drivers/net/virtio_net.c
+F:	drivers/block/virtio_blk.c
+F:	include/linux/virtio_*.h
+
 VIRTIO HOST (VHOST)
 M:	"Michael S. Tsirkin" <mst@redhat.com>
 L:	kvm@vger.kernel.org
@@ -6870,7 +6926,7 @@
 M:	Peter Korsgaard <jacmet@sunsite.dk>
 L:	linux-serial@vger.kernel.org
 S:	Maintained
-F:	drivers/serial/uartlite.c
+F:	drivers/tty/serial/uartlite.c
 
 YAM DRIVER FOR AX.25
 M:	Jean-Paul Roubelat <jpr@f6fbb.org>
@@ -6916,7 +6972,7 @@
 ZS DECSTATION Z85C30 SERIAL DRIVER
 M:	"Maciej W. Rozycki" <macro@linux-mips.org>
 S:	Maintained
-F:	drivers/serial/zs.*
+F:	drivers/tty/serial/zs.*
 
 GRE DEMULTIPLEXER DRIVER
 M:	Dmitry Kozlov <xeb@mail.ru>
diff --git a/Makefile b/Makefile
index abb49bf..5e40aa2 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 38
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc5
 NAME = Flesh-Eating Bats with Fangs
 
 # *DOCUMENTATION*
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index fc95ee1..47f63d4 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -8,6 +8,9 @@
 	select HAVE_IRQ_WORK
 	select HAVE_PERF_EVENTS
 	select HAVE_DMA_ATTRS
+	select HAVE_GENERIC_HARDIRQS
+	select GENERIC_IRQ_PROBE
+	select AUTO_IRQ_AFFINITY if SMP
 	help
 	  The Alpha is a 64-bit general-purpose processor designed and
 	  marketed by the Digital Equipment Corporation of blessed memory,
@@ -68,22 +71,6 @@
 	bool
 	default n
 
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
-config GENERIC_HARDIRQS
-	bool
-	default y
-
-config GENERIC_IRQ_PROBE
-	bool
-	default y
-
-config AUTO_IRQ_AFFINITY
-	bool
-	depends on SMP
-	default y
-
 source "init/Kconfig"
 source "kernel/Kconfig.freezer"
 
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5cff165..26d45e5 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1391,7 +1391,7 @@
 
 config OABI_COMPAT
 	bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)"
-	depends on AEABI && EXPERIMENTAL
+	depends on AEABI && EXPERIMENTAL && !THUMB2_KERNEL
 	default y
 	help
 	  This option preserves the old syscall interface along with the
diff --git a/arch/arm/configs/ag5evm_defconfig b/arch/arm/configs/ag5evm_defconfig
index 2b9cf56..212ead3 100644
--- a/arch/arm/configs/ag5evm_defconfig
+++ b/arch/arm/configs/ag5evm_defconfig
@@ -10,7 +10,7 @@
 # CONFIG_PID_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE=""
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_DEADLINE is not set
diff --git a/arch/arm/configs/am200epdkit_defconfig b/arch/arm/configs/am200epdkit_defconfig
index 5536c48..f0dea52 100644
--- a/arch/arm/configs/am200epdkit_defconfig
+++ b/arch/arm/configs/am200epdkit_defconfig
@@ -3,7 +3,7 @@
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_EPOLL is not set
 # CONFIG_SHMEM is not set
diff --git a/arch/arm/configs/at572d940hfek_defconfig b/arch/arm/configs/at572d940hfek_defconfig
index 695e32d..1b1158a 100644
--- a/arch/arm/configs/at572d940hfek_defconfig
+++ b/arch/arm/configs/at572d940hfek_defconfig
@@ -17,7 +17,7 @@
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
diff --git a/arch/arm/configs/badge4_defconfig b/arch/arm/configs/badge4_defconfig
index 3a1ad15..5b54abb 100644
--- a/arch/arm/configs/badge4_defconfig
+++ b/arch/arm/configs/badge4_defconfig
@@ -1,6 +1,6 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODVERSIONS=y
 CONFIG_ARCH_SA1100=y
diff --git a/arch/arm/configs/bcmring_defconfig b/arch/arm/configs/bcmring_defconfig
index 75984cd..795374d 100644
--- a/arch/arm/configs/bcmring_defconfig
+++ b/arch/arm/configs/bcmring_defconfig
@@ -2,7 +2,7 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 # CONFIG_HOTPLUG is not set
 # CONFIG_ELF_CORE is not set
diff --git a/arch/arm/configs/cm_x2xx_defconfig b/arch/arm/configs/cm_x2xx_defconfig
index dcfbcf3..a93ff8d 100644
--- a/arch/arm/configs/cm_x2xx_defconfig
+++ b/arch/arm/configs/cm_x2xx_defconfig
@@ -6,7 +6,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
diff --git a/arch/arm/configs/colibri_pxa270_defconfig b/arch/arm/configs/colibri_pxa270_defconfig
index f52c64e..2ef2c5e 100644
--- a/arch/arm/configs/colibri_pxa270_defconfig
+++ b/arch/arm/configs/colibri_pxa270_defconfig
@@ -8,7 +8,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/collie_defconfig b/arch/arm/configs/collie_defconfig
index 310f9a6..6c56ad0 100644
--- a/arch/arm/configs/collie_defconfig
+++ b/arch/arm/configs/collie_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_BASE_FULL is not set
 # CONFIG_EPOLL is not set
 CONFIG_SLOB=y
diff --git a/arch/arm/configs/corgi_defconfig b/arch/arm/configs/corgi_defconfig
index 4a1fa81..e53c475 100644
--- a/arch/arm/configs/corgi_defconfig
+++ b/arch/arm/configs/corgi_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/da8xx_omapl_defconfig b/arch/arm/configs/da8xx_omapl_defconfig
index cdc40c4..88ccde0 100644
--- a/arch/arm/configs/da8xx_omapl_defconfig
+++ b/arch/arm/configs/da8xx_omapl_defconfig
@@ -6,7 +6,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index 2519cc5..889922a 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -6,7 +6,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
diff --git a/arch/arm/configs/dove_defconfig b/arch/arm/configs/dove_defconfig
index 9359e1b..54bf5ee 100644
--- a/arch/arm/configs/dove_defconfig
+++ b/arch/arm/configs/dove_defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/ebsa110_defconfig b/arch/arm/configs/ebsa110_defconfig
index c319418..14559db 100644
--- a/arch/arm/configs/ebsa110_defconfig
+++ b/arch/arm/configs/ebsa110_defconfig
@@ -2,7 +2,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_ARCH_EBSA110=y
 CONFIG_PCCARD=m
diff --git a/arch/arm/configs/edb7211_defconfig b/arch/arm/configs/edb7211_defconfig
index 7b62be1..d52ded35 100644
--- a/arch/arm/configs/edb7211_defconfig
+++ b/arch/arm/configs/edb7211_defconfig
@@ -2,7 +2,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 CONFIG_ARCH_CLPS711X=y
 CONFIG_ARCH_EDB7211=y
diff --git a/arch/arm/configs/em_x270_defconfig b/arch/arm/configs/em_x270_defconfig
index d7db34f..60a21e0 100644
--- a/arch/arm/configs/em_x270_defconfig
+++ b/arch/arm/configs/em_x270_defconfig
@@ -6,7 +6,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
diff --git a/arch/arm/configs/ep93xx_defconfig b/arch/arm/configs/ep93xx_defconfig
index 6d6689c..8e97b2f 100644
--- a/arch/arm/configs/ep93xx_defconfig
+++ b/arch/arm/configs/ep93xx_defconfig
@@ -4,7 +4,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/eseries_pxa_defconfig b/arch/arm/configs/eseries_pxa_defconfig
index 1691dea..d68ac67 100644
--- a/arch/arm/configs/eseries_pxa_defconfig
+++ b/arch/arm/configs/eseries_pxa_defconfig
@@ -2,7 +2,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index c4eeb6d..227a477 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -7,7 +7,7 @@
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_RD_BZIP2=y
 CONFIG_RD_LZMA=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/footbridge_defconfig b/arch/arm/configs/footbridge_defconfig
index 4f925ea..038518a 100644
--- a/arch/arm/configs/footbridge_defconfig
+++ b/arch/arm/configs/footbridge_defconfig
@@ -3,7 +3,7 @@
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 CONFIG_MODULES=y
 CONFIG_ARCH_FOOTBRIDGE=y
diff --git a/arch/arm/configs/fortunet_defconfig b/arch/arm/configs/fortunet_defconfig
index e11c7ea..840fced 100644
--- a/arch/arm/configs/fortunet_defconfig
+++ b/arch/arm/configs/fortunet_defconfig
@@ -2,7 +2,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 CONFIG_ARCH_CLPS711X=y
 CONFIG_ARCH_FORTUNET=y
diff --git a/arch/arm/configs/h5000_defconfig b/arch/arm/configs/h5000_defconfig
index ac336f1..37903e3 100644
--- a/arch/arm/configs/h5000_defconfig
+++ b/arch/arm/configs/h5000_defconfig
@@ -4,7 +4,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_UID16 is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index ade55c8..176ec22 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -6,7 +6,7 @@
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_RD_BZIP2=y
 CONFIG_RD_LZMA=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/ixp2000_defconfig b/arch/arm/configs/ixp2000_defconfig
index 9083246..8405ade 100644
--- a/arch/arm/configs/ixp2000_defconfig
+++ b/arch/arm/configs/ixp2000_defconfig
@@ -3,7 +3,7 @@
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/ixp23xx_defconfig b/arch/arm/configs/ixp23xx_defconfig
index 7fc056a..6887176 100644
--- a/arch/arm/configs/ixp23xx_defconfig
+++ b/arch/arm/configs/ixp23xx_defconfig
@@ -3,7 +3,7 @@
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/ixp4xx_defconfig b/arch/arm/configs/ixp4xx_defconfig
index 5c50239..063e2ab 100644
--- a/arch/arm/configs/ixp4xx_defconfig
+++ b/arch/arm/configs/ixp4xx_defconfig
@@ -3,7 +3,7 @@
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODVERSIONS=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/arm/configs/loki_defconfig b/arch/arm/configs/loki_defconfig
index e1eaff7..1ba752b 100644
--- a/arch/arm/configs/loki_defconfig
+++ b/arch/arm/configs/loki_defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/lpd7a400_defconfig b/arch/arm/configs/lpd7a400_defconfig
index 20caaab..5a48f17 100644
--- a/arch/arm/configs/lpd7a400_defconfig
+++ b/arch/arm/configs/lpd7a400_defconfig
@@ -3,7 +3,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_IKCONFIG=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 # CONFIG_EPOLL is not set
 # CONFIG_IOSCHED_DEADLINE is not set
diff --git a/arch/arm/configs/lpd7a404_defconfig b/arch/arm/configs/lpd7a404_defconfig
index 1efcce9..22d0631 100644
--- a/arch/arm/configs/lpd7a404_defconfig
+++ b/arch/arm/configs/lpd7a404_defconfig
@@ -3,7 +3,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_IKCONFIG=y
 CONFIG_LOG_BUF_SHIFT=16
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 # CONFIG_EPOLL is not set
 CONFIG_SLAB=y
diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig
index af805e8..a88e64d 100644
--- a/arch/arm/configs/magician_defconfig
+++ b/arch/arm/configs/magician_defconfig
@@ -4,7 +4,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=16
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_UID16 is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/mv78xx0_defconfig b/arch/arm/configs/mv78xx0_defconfig
index b0d0824..7305ebd 100644
--- a/arch/arm/configs/mv78xx0_defconfig
+++ b/arch/arm/configs/mv78xx0_defconfig
@@ -2,7 +2,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_SLUB_DEBUG is not set
 CONFIG_PROFILING=y
diff --git a/arch/arm/configs/mx1_defconfig b/arch/arm/configs/mx1_defconfig
index 2f38d97..b39b5ce 100644
--- a/arch/arm/configs/mx1_defconfig
+++ b/arch/arm/configs/mx1_defconfig
@@ -4,7 +4,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/mx21_defconfig b/arch/arm/configs/mx21_defconfig
index 6454e18..411f88d 100644
--- a/arch/arm/configs/mx21_defconfig
+++ b/arch/arm/configs/mx21_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/mx27_defconfig b/arch/arm/configs/mx27_defconfig
index 813cfb3..9ad4c656 100644
--- a/arch/arm/configs/mx27_defconfig
+++ b/arch/arm/configs/mx27_defconfig
@@ -4,7 +4,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
diff --git a/arch/arm/configs/mx3_defconfig b/arch/arm/configs/mx3_defconfig
index e648ea3..7c4b30b 100644
--- a/arch/arm/configs/mx3_defconfig
+++ b/arch/arm/configs/mx3_defconfig
@@ -4,7 +4,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/mx51_defconfig b/arch/arm/configs/mx51_defconfig
index 5c7a872..9cba68c 100644
--- a/arch/arm/configs/mx51_defconfig
+++ b/arch/arm/configs/mx51_defconfig
@@ -3,7 +3,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=18
 CONFIG_RELAY=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig
index 0e2dc26..37207d1 100644
--- a/arch/arm/configs/nhk8815_defconfig
+++ b/arch/arm/configs/nhk8815_defconfig
@@ -7,7 +7,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index a350cc6..7b63462 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -6,7 +6,7 @@
 CONFIG_IKCONFIG=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_BASE_FULL is not set
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index ccedde1..ae890ca 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -6,7 +6,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=16
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_SLAB=y
diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig
index 439323b..a288d70 100644
--- a/arch/arm/configs/orion5x_defconfig
+++ b/arch/arm/configs/orion5x_defconfig
@@ -2,7 +2,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SLUB_DEBUG is not set
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=y
diff --git a/arch/arm/configs/pcm027_defconfig b/arch/arm/configs/pcm027_defconfig
index 583a061..2f136c3 100644
--- a/arch/arm/configs/pcm027_defconfig
+++ b/arch/arm/configs/pcm027_defconfig
@@ -7,7 +7,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/pcontrol_g20_defconfig b/arch/arm/configs/pcontrol_g20_defconfig
index b42ee62..c75c9fc 100644
--- a/arch/arm/configs/pcontrol_g20_defconfig
+++ b/arch/arm/configs/pcontrol_g20_defconfig
@@ -10,7 +10,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_NAMESPACES=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arm/configs/pleb_defconfig b/arch/arm/configs/pleb_defconfig
index d1efbdc..cb08cc5 100644
--- a/arch/arm/configs/pleb_defconfig
+++ b/arch/arm/configs/pleb_defconfig
@@ -3,7 +3,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 # CONFIG_SHMEM is not set
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/pnx4008_defconfig b/arch/arm/configs/pnx4008_defconfig
index bd481f0..35a31cc 100644
--- a/arch/arm/configs/pnx4008_defconfig
+++ b/arch/arm/configs/pnx4008_defconfig
@@ -5,7 +5,7 @@
 CONFIG_AUDIT=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/simpad_defconfig b/arch/arm/configs/simpad_defconfig
index af3b12e..d335815 100644
--- a/arch/arm/configs/simpad_defconfig
+++ b/arch/arm/configs/simpad_defconfig
@@ -2,7 +2,7 @@
 CONFIG_LOCALVERSION="oe1"
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/spitz_defconfig b/arch/arm/configs/spitz_defconfig
index aebd4bb..7015827 100644
--- a/arch/arm/configs/spitz_defconfig
+++ b/arch/arm/configs/spitz_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/stmp378x_defconfig b/arch/arm/configs/stmp378x_defconfig
index 94a2d90..1079c2b 100644
--- a/arch/arm/configs/stmp378x_defconfig
+++ b/arch/arm/configs/stmp378x_defconfig
@@ -5,7 +5,7 @@
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/stmp37xx_defconfig b/arch/arm/configs/stmp37xx_defconfig
index d8ee58c..564a5cc 100644
--- a/arch/arm/configs/stmp37xx_defconfig
+++ b/arch/arm/configs/stmp37xx_defconfig
@@ -5,7 +5,7 @@
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/arm/configs/tct_hammer_defconfig b/arch/arm/configs/tct_hammer_defconfig
index e89ca19..95c0f0d 100644
--- a/arch/arm/configs/tct_hammer_defconfig
+++ b/arch/arm/configs/tct_hammer_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_BUG is not set
 # CONFIG_ELF_CORE is not set
diff --git a/arch/arm/configs/trizeps4_defconfig b/arch/arm/configs/trizeps4_defconfig
index 37f4834..3162173 100644
--- a/arch/arm/configs/trizeps4_defconfig
+++ b/arch/arm/configs/trizeps4_defconfig
@@ -7,7 +7,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/u300_defconfig b/arch/arm/configs/u300_defconfig
index c1c252c..4a5a126 100644
--- a/arch/arm/configs/u300_defconfig
+++ b/arch/arm/configs/u300_defconfig
@@ -3,7 +3,7 @@
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_AIO is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
 CONFIG_MODULES=y
diff --git a/arch/arm/configs/viper_defconfig b/arch/arm/configs/viper_defconfig
index 9d7bf5e..8b0c717 100644
--- a/arch/arm/configs/viper_defconfig
+++ b/arch/arm/configs/viper_defconfig
@@ -3,7 +3,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=13
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_ELF_CORE is not set
 # CONFIG_SHMEM is not set
 CONFIG_SLAB=y
diff --git a/arch/arm/configs/xcep_defconfig b/arch/arm/configs/xcep_defconfig
index 70d47db..5b55041 100644
--- a/arch/arm/configs/xcep_defconfig
+++ b/arch/arm/configs/xcep_defconfig
@@ -8,7 +8,7 @@
 CONFIG_LOG_BUF_SHIFT=16
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_UID16 is not set
 # CONFIG_SHMEM is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h
index a101f10..721847d 100644
--- a/arch/arm/include/asm/hardware/sp810.h
+++ b/arch/arm/include/asm/hardware/sp810.h
@@ -50,6 +50,12 @@
 #define SCPCELLID2		0xFF8
 #define SCPCELLID3		0xFFC
 
+#define SCCTRL_TIMEREN0SEL_REFCLK	(0 << 15)
+#define SCCTRL_TIMEREN0SEL_TIMCLK	(1 << 15)
+
+#define SCCTRL_TIMEREN1SEL_REFCLK	(0 << 17)
+#define SCCTRL_TIMEREN1SEL_TIMCLK	(1 << 17)
+
 static inline void sysctl_soft_reset(void __iomem *base)
 {
 	/* writing any value to SCSYSSTAT reg will reset system */
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 20e0f7c..d66605d 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -95,6 +95,15 @@
 	return (void __iomem *)addr;
 }
 
+/* IO barriers */
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
+#define __iormb()		rmb()
+#define __iowmb()		wmb()
+#else
+#define __iormb()		do { } while (0)
+#define __iowmb()		do { } while (0)
+#endif
+
 /*
  * Now, pick up the machine-defined IO definitions
  */
@@ -125,17 +134,17 @@
  * The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.
  */
 #ifdef __io
-#define outb(v,p)		__raw_writeb(v,__io(p))
-#define outw(v,p)		__raw_writew((__force __u16) \
-					cpu_to_le16(v),__io(p))
-#define outl(v,p)		__raw_writel((__force __u32) \
-					cpu_to_le32(v),__io(p))
+#define outb(v,p)	({ __iowmb(); __raw_writeb(v,__io(p)); })
+#define outw(v,p)	({ __iowmb(); __raw_writew((__force __u16) \
+					cpu_to_le16(v),__io(p)); })
+#define outl(v,p)	({ __iowmb(); __raw_writel((__force __u32) \
+					cpu_to_le32(v),__io(p)); })
 
-#define inb(p)	({ __u8 __v = __raw_readb(__io(p)); __v; })
+#define inb(p)	({ __u8 __v = __raw_readb(__io(p)); __iormb(); __v; })
 #define inw(p)	({ __u16 __v = le16_to_cpu((__force __le16) \
-			__raw_readw(__io(p))); __v; })
+			__raw_readw(__io(p))); __iormb(); __v; })
 #define inl(p)	({ __u32 __v = le32_to_cpu((__force __le32) \
-			__raw_readl(__io(p))); __v; })
+			__raw_readl(__io(p))); __iormb(); __v; })
 
 #define outsb(p,d,l)		__raw_writesb(__io(p),d,l)
 #define outsw(p,d,l)		__raw_writesw(__io(p),d,l)
@@ -192,14 +201,6 @@
 #define writel_relaxed(v,c)	((void)__raw_writel((__force u32) \
 					cpu_to_le32(v),__mem_pci(c)))
 
-#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
-#define __iormb()		rmb()
-#define __iowmb()		wmb()
-#else
-#define __iormb()		do { } while (0)
-#define __iowmb()		do { } while (0)
-#endif
-
 #define readb(c)		({ u8  __v = readb_relaxed(c); __iormb(); __v; })
 #define readw(c)		({ u16 __v = readw_relaxed(c); __iormb(); __v; })
 #define readl(c)		({ u32 __v = readl_relaxed(c); __iormb(); __v; })
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 23c2e8e..d0ee74b 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -188,7 +188,7 @@
  * translation for translating DMA addresses.  Use the driver
  * DMA support - see dma-mapping.h.
  */
-static inline unsigned long virt_to_phys(void *x)
+static inline unsigned long virt_to_phys(const volatile void *x)
 {
 	return __virt_to_phys((unsigned long)(x));
 }
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index f17d9a0..f06ff9f 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -391,25 +391,24 @@
 
 
 #ifdef CONFIG_SMP_ON_UP
+	__INIT
 __fixup_smp:
-	mov	r4, #0x00070000
-	orr	r3, r4, #0xff000000	@ mask 0xff070000
-	orr	r4, r4, #0x41000000	@ val 0x41070000
-	and	r0, r9, r3
-	teq	r0, r4			@ ARM CPU and ARMv6/v7?
+	and	r3, r9, #0x000f0000	@ architecture version
+	teq	r3, #0x000f0000		@ CPU ID supported?
 	bne	__fixup_smp_on_up	@ no, assume UP
 
-	orr	r3, r3, #0x0000ff00
-	orr	r3, r3, #0x000000f0	@ mask 0xff07fff0
+	bic	r3, r9, #0x00ff0000
+	bic	r3, r3, #0x0000000f	@ mask 0xff00fff0
+	mov	r4, #0x41000000
 	orr	r4, r4, #0x0000b000
-	orr	r4, r4, #0x00000020	@ val 0x4107b020
-	and	r0, r9, r3
-	teq	r0, r4			@ ARM 11MPCore?
+	orr	r4, r4, #0x00000020	@ val 0x4100b020
+	teq	r3, r4			@ ARM 11MPCore?
 	moveq	pc, lr			@ yes, assume SMP
 
 	mrc	p15, 0, r0, c0, c0, 5	@ read MPIDR
-	tst	r0, #1 << 31
-	movne	pc, lr			@ bit 31 => SMP
+	and	r0, r0, #0xc0000000	@ multiprocessing extensions and
+	teq	r0, #0x80000000		@ not part of a uniprocessor system?
+	moveq	pc, lr			@ yes, assume SMP
 
 __fixup_smp_on_up:
 	adr	r0, 1f
@@ -417,18 +416,7 @@
 	sub	r3, r0, r3
 	add	r4, r4, r3
 	add	r5, r5, r3
-2:	cmp	r4, r5
-	movhs	pc, lr
-	ldmia	r4!, {r0, r6}
- ARM(	str	r6, [r0, r3]	)
- THUMB(	add	r0, r0, r3	)
-#ifdef __ARMEB__
- THUMB(	mov	r6, r6, ror #16	)	@ Convert word order for big-endian.
-#endif
- THUMB(	strh	r6, [r0], #2	)	@ For Thumb-2, store as two halfwords
- THUMB(	mov	r6, r6, lsr #16	)	@ to be robust against misaligned r3.
- THUMB(	strh	r6, [r0]	)
-	b	2b
+	b	__do_fixup_smp_on_up
 ENDPROC(__fixup_smp)
 
 	.align
@@ -442,7 +430,31 @@
 	ALT_SMP(.long	1)
 	ALT_UP(.long	0)
 	.popsection
-
 #endif
 
+	.text
+__do_fixup_smp_on_up:
+	cmp	r4, r5
+	movhs	pc, lr
+	ldmia	r4!, {r0, r6}
+ ARM(	str	r6, [r0, r3]	)
+ THUMB(	add	r0, r0, r3	)
+#ifdef __ARMEB__
+ THUMB(	mov	r6, r6, ror #16	)	@ Convert word order for big-endian.
+#endif
+ THUMB(	strh	r6, [r0], #2	)	@ For Thumb-2, store as two halfwords
+ THUMB(	mov	r6, r6, lsr #16	)	@ to be robust against misaligned r3.
+ THUMB(	strh	r6, [r0]	)
+	b	__do_fixup_smp_on_up
+ENDPROC(__do_fixup_smp_on_up)
+
+ENTRY(fixup_smp)
+	stmfd	sp!, {r4 - r6, lr}
+	mov	r4, r0
+	add	r5, r0, r1
+	mov	r3, #0
+	bl	__do_fixup_smp_on_up
+	ldmfd	sp!, {r4 - r6, pc}
+ENDPROC(fixup_smp)
+
 #include "head-common.S"
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index c9f3f04..d600bd3 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -137,11 +137,10 @@
 	u32 didr;
 
 	/* Do we implement the extended CPUID interface? */
-	if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
-		pr_warning("CPUID feature registers not supported. "
-				"Assuming v6 debug is present.\n");
+	if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf),
+	    "CPUID feature registers not supported. "
+	    "Assuming v6 debug is present.\n"))
 		return ARM_DEBUG_ARCH_V6;
-	}
 
 	ARM_DBG_READ(c0, 0, didr);
 	return (didr >> 16) & 0xf;
@@ -152,6 +151,12 @@
 	return debug_arch;
 }
 
+static int debug_arch_supported(void)
+{
+	u8 arch = get_debug_arch();
+	return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14;
+}
+
 /* Determine number of BRP register available. */
 static int get_num_brp_resources(void)
 {
@@ -268,6 +273,9 @@
 
 int hw_breakpoint_slots(int type)
 {
+	if (!debug_arch_supported())
+		return 0;
+
 	/*
 	 * We can be called early, so don't rely on
 	 * our static variables being initialised.
@@ -834,11 +842,11 @@
 
 	/*
 	 * v7 debug contains save and restore registers so that debug state
-	 * can be maintained across low-power modes without leaving
-	 * the debug logic powered up. It is IMPLEMENTATION DEFINED whether
-	 * we can write to the debug registers out of reset, so we must
-	 * unlock the OS Lock Access Register to avoid taking undefined
-	 * instruction exceptions later on.
+	 * can be maintained across low-power modes without leaving the debug
+	 * logic powered up. It is IMPLEMENTATION DEFINED whether we can access
+	 * the debug registers out of reset, so we must unlock the OS Lock
+	 * Access Register to avoid taking undefined instruction exceptions
+	 * later on.
 	 */
 	if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
 		/*
@@ -882,7 +890,7 @@
 
 	debug_arch = get_debug_arch();
 
-	if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) {
+	if (!debug_arch_supported()) {
 		pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
 		return 0;
 	}
@@ -899,18 +907,18 @@
 		pr_info("%d breakpoint(s) reserved for watchpoint "
 				"single-step.\n", core_num_reserved_brps);
 
+	/*
+	 * Reset the breakpoint resources. We assume that a halting
+	 * debugger will leave the world in a nice state for us.
+	 */
+	on_each_cpu(reset_ctrl_regs, NULL, 1);
+
 	ARM_DBG_READ(c1, 0, dscr);
 	if (dscr & ARM_DSCR_HDBGEN) {
+		max_watchpoint_len = 4;
 		pr_warning("halting debug mode enabled. Assuming maximum "
-				"watchpoint size of 4 bytes.");
+			   "watchpoint size of %u bytes.", max_watchpoint_len);
 	} else {
-		/*
-		 * Reset the breakpoint resources. We assume that a halting
-		 * debugger will leave the world in a nice state for us.
-		 */
-		smp_call_function(reset_ctrl_regs, NULL, 1);
-		reset_ctrl_regs(NULL);
-
 		/* Work out the maximum supported watchpoint length. */
 		max_watchpoint_len = get_max_wp_len();
 		pr_info("maximum watchpoint size is %u bytes.\n",
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 2cfe816..6d4105e 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -22,6 +22,7 @@
 
 #include <asm/pgtable.h>
 #include <asm/sections.h>
+#include <asm/smp_plat.h>
 #include <asm/unwind.h>
 
 #ifdef CONFIG_XIP_KERNEL
@@ -268,12 +269,28 @@
 	const Elf_Shdr *txt_sec;
 };
 
+static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
+	const Elf_Shdr *sechdrs, const char *name)
+{
+	const Elf_Shdr *s, *se;
+	const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+	for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++)
+		if (strcmp(name, secstrs + s->sh_name) == 0)
+			return s;
+
+	return NULL;
+}
+
+extern void fixup_smp(const void *, unsigned long);
+
 int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
 		    struct module *mod)
 {
+	const Elf_Shdr * __maybe_unused s = NULL;
 #ifdef CONFIG_ARM_UNWIND
 	const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
-	const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum;
+	const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
 	struct mod_unwind_map maps[ARM_SEC_MAX];
 	int i;
 
@@ -315,6 +332,9 @@
 					         maps[i].txt_sec->sh_addr,
 					         maps[i].txt_sec->sh_size);
 #endif
+	s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
+	if (s && !is_smp())
+		fixup_smp((void *)s->sh_addr, s->sh_size);
 	return 0;
 }
 
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 5efa264..d150ad1 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -700,7 +700,7 @@
 	 * Frame pointers should strictly progress back up the stack
 	 * (towards higher addresses).
 	 */
-	if (tail >= buftail.fp)
+	if (tail + 1 >= buftail.fp)
 		return NULL;
 
 	return buftail.fp - 1;
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index fd91566..60636f4 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -36,6 +36,7 @@
 		/* timer load already set up */
 		ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
 			| TWD_TIMER_CONTROL_PERIODIC;
+		__raw_writel(twd_timer_rate / HZ, twd_base + TWD_TIMER_LOAD);
 		break;
 	case CLOCK_EVT_MODE_ONESHOT:
 		/* period set, and timer enabled in 'next_event' hook */
@@ -81,7 +82,7 @@
 
 static void __cpuinit twd_calibrate_rate(void)
 {
-	unsigned long load, count;
+	unsigned long count;
 	u64 waitjiffies;
 
 	/*
@@ -116,10 +117,6 @@
 		printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000,
 			(twd_timer_rate / 1000000) % 100);
 	}
-
-	load = twd_timer_rate / HZ;
-
-	__raw_writel(load, twd_base + TWD_TIMER_LOAD);
 }
 
 /*
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index ffdf87b..8207954 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -838,7 +838,7 @@
 static struct resource ep93xx_ac97_resources[] = {
 	{
 		.start	= EP93XX_AAC_PHYS_BASE,
-		.end	= EP93XX_AAC_PHYS_BASE + 0xb0 - 1,
+		.end	= EP93XX_AAC_PHYS_BASE + 0xac - 1,
 		.flags	= IORESOURCE_MEM,
 	},
 	{
diff --git a/arch/arm/mach-ep93xx/gpio.c b/arch/arm/mach-ep93xx/gpio.c
index f3dc76f..bec34b8 100644
--- a/arch/arm/mach-ep93xx/gpio.c
+++ b/arch/arm/mach-ep93xx/gpio.c
@@ -427,6 +427,13 @@
 {
 	int i;
 
+	/* Set Ports C, D, E, G, and H for GPIO use */
+	ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS |
+				 EP93XX_SYSCON_DEVCFG_GONK |
+				 EP93XX_SYSCON_DEVCFG_EONIDE |
+				 EP93XX_SYSCON_DEVCFG_GONIDE |
+				 EP93XX_SYSCON_DEVCFG_HONIDE);
+
 	for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++)
 		gpiochip_add(&ep93xx_gpio_banks[i].chip);
 }
diff --git a/arch/arm/mach-footbridge/include/mach/debug-macro.S b/arch/arm/mach-footbridge/include/mach/debug-macro.S
index 3c9e0c4..30b971d 100644
--- a/arch/arm/mach-footbridge/include/mach/debug-macro.S
+++ b/arch/arm/mach-footbridge/include/mach/debug-macro.S
@@ -17,8 +17,8 @@
 	/* For NetWinder debugging */
 		.macro	addruart, rp, rv
 		mov	\rp, #0x000003f8
-		orr	\rv, \rp, #0x7c000000	@ physical
-		orr	\rp, \rp, #0xff000000	@ virtual
+		orr	\rv, \rp, #0xff000000	@ virtual
+		orr	\rp, \rp, #0x7c000000	@ physical
 		.endm
 
 #define UART_SHIFT	0
diff --git a/arch/arm/mach-imx/mach-mx25_3ds.c b/arch/arm/mach-imx/mach-mx25_3ds.c
index aa76cfd..8382e79 100644
--- a/arch/arm/mach-imx/mach-mx25_3ds.c
+++ b/arch/arm/mach-imx/mach-mx25_3ds.c
@@ -180,7 +180,7 @@
 	KEY(3, 3, KEY_POWER),
 };
 
-static const struct matrix_keymap_data mx25pdk_keymap_data __initdata = {
+static const struct matrix_keymap_data mx25pdk_keymap_data __initconst = {
 	.keymap		= mx25pdk_keymap,
 	.keymap_size	= ARRAY_SIZE(mx25pdk_keymap),
 };
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 4dc68d6..9fd8942 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -432,7 +432,7 @@
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-unsigned long ixp4xx_timer_freq = FREQ;
+unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ;
 EXPORT_SYMBOL(ixp4xx_timer_freq);
 static void __init ixp4xx_clocksource_init(void)
 {
@@ -496,7 +496,7 @@
 
 static void __init ixp4xx_clockevent_init(void)
 {
-	clockevent_ixp4xx.mult = div_sc(FREQ, NSEC_PER_SEC,
+	clockevent_ixp4xx.mult = div_sc(IXP4XX_TIMER_FREQ, NSEC_PER_SEC,
 					clockevent_ixp4xx.shift);
 	clockevent_ixp4xx.max_delta_ns =
 		clockevent_delta2ns(0xfffffffe, &clockevent_ixp4xx);
diff --git a/arch/arm/mach-ixp4xx/include/mach/timex.h b/arch/arm/mach-ixp4xx/include/mach/timex.h
index 2c3f93c..c9e930f 100644
--- a/arch/arm/mach-ixp4xx/include/mach/timex.h
+++ b/arch/arm/mach-ixp4xx/include/mach/timex.h
@@ -10,6 +10,7 @@
  * 66.66... MHz. We do a convulted calculation of CLOCK_TICK_RATE b/c the
  * timer register ignores the bottom 2 bits of the LATCH value.
  */
-#define FREQ 66666000
-#define CLOCK_TICK_RATE (((FREQ / HZ & ~IXP4XX_OST_RELOAD_MASK) + 1) * HZ)
+#define IXP4XX_TIMER_FREQ 66666000
+#define CLOCK_TICK_RATE \
+	(((IXP4XX_TIMER_FREQ / HZ & ~IXP4XX_OST_RELOAD_MASK) + 1) * HZ)
 
diff --git a/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c b/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
index bfdbe4b..852f7c9 100644
--- a/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
+++ b/arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
@@ -265,6 +265,11 @@
 	       qmgr_queue_descs[queue], queue);
 	qmgr_queue_descs[queue][0] = '\x0';
 #endif
+
+	while ((addr = qmgr_get_entry(queue)))
+		printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
+		       queue, addr);
+
 	__raw_writel(0, &qmgr_regs->sram[queue]);
 
 	used_sram_bitmap[0] &= ~mask[0];
@@ -275,10 +280,6 @@
 	spin_unlock_irq(&qmgr_lock);
 
 	module_put(THIS_MODULE);
-
-	while ((addr = qmgr_get_entry(queue)))
-		printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
-		       queue, addr);
 }
 
 static int qmgr_init(void)
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c
index 2e83913..6dde818 100644
--- a/arch/arm/mach-msm/board-qsd8x50.c
+++ b/arch/arm/mach-msm/board-qsd8x50.c
@@ -43,7 +43,7 @@
  * at run-time: they vary from board to board, and the true
  * configuration won't be known until boot.
  */
-static struct resource smc91x_resources[] __initdata = {
+static struct resource smc91x_resources[] = {
 	[0] = {
 		.flags = IORESOURCE_MEM,
 	},
@@ -52,7 +52,7 @@
 	},
 };
 
-static struct platform_device smc91x_device __initdata = {
+static struct platform_device smc91x_device = {
 	.name           = "smc91x",
 	.id             = 0,
 	.num_resources  = ARRAY_SIZE(smc91x_resources),
diff --git a/arch/arm/mach-mxs/clock-mx23.c b/arch/arm/mach-mxs/clock-mx23.c
index b1a362e..ca72a05 100644
--- a/arch/arm/mach-mxs/clock-mx23.c
+++ b/arch/arm/mach-mxs/clock-mx23.c
@@ -304,7 +304,7 @@
 	reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);		\
 	reg &= ~BM_CLKCTRL_##dr##_DIV;					\
 	reg |= div << BP_CLKCTRL_##dr##_DIV;				\
-	if (reg | (1 << clk->enable_shift)) {				\
+	if (reg & (1 << clk->enable_shift)) {				\
 		pr_err("%s: clock is gated\n", __func__);		\
 		return -EINVAL;						\
 	}								\
@@ -347,7 +347,7 @@
 {									\
 	if (parent != clk->parent) {					\
 		__raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit,		\
-			 HW_CLKCTRL_CLKSEQ_TOG);			\
+			 CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG);	\
 		clk->parent = parent;					\
 	}								\
 									\
diff --git a/arch/arm/mach-mxs/clock-mx28.c b/arch/arm/mach-mxs/clock-mx28.c
index 56312c0..fd1c4c5 100644
--- a/arch/arm/mach-mxs/clock-mx28.c
+++ b/arch/arm/mach-mxs/clock-mx28.c
@@ -355,12 +355,12 @@
 	} else {							\
 		reg &= ~BM_CLKCTRL_##dr##_DIV;				\
 		reg |= div << BP_CLKCTRL_##dr##_DIV;			\
-		if (reg | (1 << clk->enable_shift)) {			\
+		if (reg & (1 << clk->enable_shift)) {			\
 			pr_err("%s: clock is gated\n", __func__);	\
 			return -EINVAL;					\
 		}							\
 	}								\
-	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU);		\
+	__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr);		\
 									\
 	for (i = 10000; i; i--)						\
 		if (!(__raw_readl(CLKCTRL_BASE_ADDR +			\
@@ -483,7 +483,7 @@
 {									\
 	if (parent != clk->parent) {					\
 		__raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit,		\
-			 HW_CLKCTRL_CLKSEQ_TOG);			\
+			 CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG);	\
 		clk->parent = parent;					\
 	}								\
 									\
@@ -609,7 +609,6 @@
 	_REGISTER_CLOCK("duart", NULL, uart_clk)
 	_REGISTER_CLOCK("imx28-fec.0", NULL, fec_clk)
 	_REGISTER_CLOCK("imx28-fec.1", NULL, fec_clk)
-	_REGISTER_CLOCK("fec.0", NULL, fec_clk)
 	_REGISTER_CLOCK("rtc", NULL, rtc_clk)
 	_REGISTER_CLOCK("pll2", NULL, pll2_clk)
 	_REGISTER_CLOCK(NULL, "hclk", hbus_clk)
diff --git a/arch/arm/mach-mxs/clock.c b/arch/arm/mach-mxs/clock.c
index e7d2269..a7093c8 100644
--- a/arch/arm/mach-mxs/clock.c
+++ b/arch/arm/mach-mxs/clock.c
@@ -57,7 +57,6 @@
 		if (clk->disable)
 			clk->disable(clk);
 		__clk_disable(clk->parent);
-		__clk_disable(clk->secondary);
 	}
 }
 
@@ -68,7 +67,6 @@
 
 	if (clk->usecount++ == 0) {
 		__clk_enable(clk->parent);
-		__clk_enable(clk->secondary);
 
 		if (clk->enable)
 			clk->enable(clk);
diff --git a/arch/arm/mach-mxs/gpio.c b/arch/arm/mach-mxs/gpio.c
index d7ad7a6..cb0c0e8 100644
--- a/arch/arm/mach-mxs/gpio.c
+++ b/arch/arm/mach-mxs/gpio.c
@@ -139,6 +139,8 @@
 	struct mxs_gpio_port *port = (struct mxs_gpio_port *)get_irq_data(irq);
 	u32 gpio_irq_no_base = port->virtual_irq_start;
 
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
+
 	irq_stat = __raw_readl(port->base + PINCTRL_IRQSTAT(port->id)) &
 			__raw_readl(port->base + PINCTRL_IRQEN(port->id));
 
diff --git a/arch/arm/mach-mxs/include/mach/clock.h b/arch/arm/mach-mxs/include/mach/clock.h
index 041e276..592c9ab 100644
--- a/arch/arm/mach-mxs/include/mach/clock.h
+++ b/arch/arm/mach-mxs/include/mach/clock.h
@@ -29,8 +29,6 @@
 	int id;
 	/* Source clock this clk depends on */
 	struct clk *parent;
-	/* Secondary clock to enable/disable with this clock */
-	struct clk *secondary;
 	/* Reference count of clock enable/disable */
 	__s8 usecount;
 	/* Register bit position for clock's enable/disable control. */
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index 8d2f2da..e0a0281 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -9,6 +9,7 @@
 	depends on ARCH_OMAP1
 	bool "OMAP730 Based System"
 	select CPU_ARM926T
+	select OMAP_MPU_TIMER
 	select ARCH_OMAP_OTG
 
 config ARCH_OMAP850
@@ -22,6 +23,7 @@
 	default y
 	bool "OMAP15xx Based System"
 	select CPU_ARM925T
+	select OMAP_MPU_TIMER
 
 config ARCH_OMAP16XX
 	depends on ARCH_OMAP1
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index 6ee1950..ba6009f 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -3,12 +3,11 @@
 #
 
 # Common support
-obj-y := io.o id.o sram.o irq.o mux.o flash.o serial.o devices.o dma.o
+obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o
 obj-y += clock.o clock_data.o opp_data.o
 
 obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
 
-obj-$(CONFIG_OMAP_MPU_TIMER)	+= time.o
 obj-$(CONFIG_OMAP_32K_TIMER)	+= timer32k.o
 
 # Power Management
diff --git a/arch/arm/mach-omap1/include/mach/entry-macro.S b/arch/arm/mach-omap1/include/mach/entry-macro.S
index c9be6d4..bfb4fb1 100644
--- a/arch/arm/mach-omap1/include/mach/entry-macro.S
+++ b/arch/arm/mach-omap1/include/mach/entry-macro.S
@@ -14,19 +14,6 @@
 #include <mach/irqs.h>
 #include <asm/hardware/gic.h>
 
-/*
- * We use __glue to avoid errors with multiple definitions of
- * .globl omap_irq_flags as it's included from entry-armv.S but not
- * from entry-common.S.
- */
-#ifdef __glue
-		.pushsection .data
-		.globl	omap_irq_flags
-omap_irq_flags:
-		.word	0
-		.popsection
-#endif
-
  		.macro	disable_fiq
 		.endm
 
diff --git a/arch/arm/mach-omap1/irq.c b/arch/arm/mach-omap1/irq.c
index 4770158..731dd33 100644
--- a/arch/arm/mach-omap1/irq.c
+++ b/arch/arm/mach-omap1/irq.c
@@ -57,6 +57,7 @@
 	unsigned long wake_enable;
 };
 
+u32 omap_irq_flags;
 static unsigned int irq_bank_count;
 static struct omap_irq_bank *irq_banks;
 
@@ -176,7 +177,6 @@
 
 void __init omap_init_irq(void)
 {
-	extern unsigned int omap_irq_flags;
 	int i, j;
 
 #if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
diff --git a/arch/arm/mach-omap1/lcd_dma.c b/arch/arm/mach-omap1/lcd_dma.c
index c9088d8..4538093 100644
--- a/arch/arm/mach-omap1/lcd_dma.c
+++ b/arch/arm/mach-omap1/lcd_dma.c
@@ -37,7 +37,7 @@
 	 * On OMAP1510, internal LCD controller will start the transfer
 	 * when it gets enabled, so assume DMA running if LCD enabled.
 	 */
-	if (cpu_is_omap1510())
+	if (cpu_is_omap15xx())
 		if (omap_readw(OMAP_LCDC_CONTROL) & OMAP_LCDC_CTRL_LCD_EN)
 			return 1;
 
@@ -95,7 +95,7 @@
 
 void omap_set_lcd_dma_b1_rotation(int rotate)
 {
-	if (cpu_is_omap1510()) {
+	if (cpu_is_omap15xx()) {
 		printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n");
 		BUG();
 		return;
@@ -106,7 +106,7 @@
 
 void omap_set_lcd_dma_b1_mirror(int mirror)
 {
-	if (cpu_is_omap1510()) {
+	if (cpu_is_omap15xx()) {
 		printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n");
 		BUG();
 	}
@@ -116,7 +116,7 @@
 
 void omap_set_lcd_dma_b1_vxres(unsigned long vxres)
 {
-	if (cpu_is_omap1510()) {
+	if (cpu_is_omap15xx()) {
 		printk(KERN_ERR "DMA virtual resulotion is not supported "
 				"in 1510 mode\n");
 		BUG();
@@ -127,7 +127,7 @@
 
 void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale)
 {
-	if (cpu_is_omap1510()) {
+	if (cpu_is_omap15xx()) {
 		printk(KERN_ERR "DMA scale is not supported in 1510 mode\n");
 		BUG();
 	}
@@ -177,7 +177,7 @@
 			bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
 			/* 1510 DMA requires the bottom address to be 2 more
 			 * than the actual last memory access location. */
-			if (cpu_is_omap1510() &&
+			if (cpu_is_omap15xx() &&
 				lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32)
 					bottom += 2;
 			ei = PIXSTEP(0, 0, 1, 0);
@@ -241,7 +241,7 @@
 		return;	/* Suppress warning about uninitialized vars */
 	}
 
-	if (cpu_is_omap1510()) {
+	if (cpu_is_omap15xx()) {
 		omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U);
 		omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L);
 		omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U);
@@ -343,7 +343,7 @@
 		BUG();
 		return;
 	}
-	if (!cpu_is_omap1510())
+	if (!cpu_is_omap15xx())
 		omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1,
 			    OMAP1610_DMA_LCD_CCR);
 	lcd_dma.reserved = 0;
@@ -360,7 +360,7 @@
 	 * connected. Otherwise the OMAP internal controller will
 	 * start the transfer when it gets enabled.
 	 */
-	if (cpu_is_omap1510() || !lcd_dma.ext_ctrl)
+	if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl)
 		return;
 
 	w = omap_readw(OMAP1610_DMA_LCD_CTRL);
@@ -378,14 +378,14 @@
 void omap_setup_lcd_dma(void)
 {
 	BUG_ON(lcd_dma.active);
-	if (!cpu_is_omap1510()) {
+	if (!cpu_is_omap15xx()) {
 		/* Set some reasonable defaults */
 		omap_writew(0x5440, OMAP1610_DMA_LCD_CCR);
 		omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP);
 		omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL);
 	}
 	set_b1_regs();
-	if (!cpu_is_omap1510()) {
+	if (!cpu_is_omap15xx()) {
 		u16 w;
 
 		w = omap_readw(OMAP1610_DMA_LCD_CCR);
@@ -407,7 +407,7 @@
 	u16 w;
 
 	lcd_dma.active = 0;
-	if (cpu_is_omap1510() || !lcd_dma.ext_ctrl)
+	if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl)
 		return;
 
 	w = omap_readw(OMAP1610_DMA_LCD_CCR);
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index ed7a61f..6885d2f 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -49,11 +49,15 @@
 #include <mach/hardware.h>
 #include <asm/leds.h>
 #include <asm/irq.h>
+#include <asm/sched_clock.h>
+
 #include <asm/mach/irq.h>
 #include <asm/mach/time.h>
 
 #include <plat/common.h>
 
+#ifdef CONFIG_OMAP_MPU_TIMER
+
 #define OMAP_MPU_TIMER_BASE		OMAP_MPU_TIMER1_BASE
 #define OMAP_MPU_TIMER_OFFSET		0x100
 
@@ -67,7 +71,7 @@
 ((volatile omap_mpu_timer_regs_t*)OMAP1_IO_ADDRESS(OMAP_MPU_TIMER_BASE +	\
 				 (n)*OMAP_MPU_TIMER_OFFSET))
 
-static inline unsigned long omap_mpu_timer_read(int nr)
+static inline unsigned long notrace omap_mpu_timer_read(int nr)
 {
 	volatile omap_mpu_timer_regs_t* timer = omap_mpu_timer_base(nr);
 	return timer->read_tim;
@@ -212,6 +216,32 @@
 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
+static DEFINE_CLOCK_DATA(cd);
+
+static inline unsigned long long notrace _omap_mpu_sched_clock(void)
+{
+	u32 cyc = mpu_read(&clocksource_mpu);
+	return cyc_to_sched_clock(&cd, cyc, (u32)~0);
+}
+
+#ifndef CONFIG_OMAP_32K_TIMER
+unsigned long long notrace sched_clock(void)
+{
+	return _omap_mpu_sched_clock();
+}
+#else
+static unsigned long long notrace omap_mpu_sched_clock(void)
+{
+	return _omap_mpu_sched_clock();
+}
+#endif
+
+static void notrace mpu_update_sched_clock(void)
+{
+	u32 cyc = mpu_read(&clocksource_mpu);
+	update_sched_clock(&cd, cyc, (u32)~0);
+}
+
 static void __init omap_init_clocksource(unsigned long rate)
 {
 	static char err[] __initdata = KERN_ERR
@@ -219,17 +249,13 @@
 
 	setup_irq(INT_TIMER2, &omap_mpu_timer2_irq);
 	omap_mpu_timer_start(1, ~0, 1);
+	init_sched_clock(&cd, mpu_update_sched_clock, 32, rate);
 
 	if (clocksource_register_hz(&clocksource_mpu, rate))
 		printk(err, clocksource_mpu.name);
 }
 
-/*
- * ---------------------------------------------------------------------------
- * Timer initialization
- * ---------------------------------------------------------------------------
- */
-static void __init omap_timer_init(void)
+static void __init omap_mpu_timer_init(void)
 {
 	struct clk	*ck_ref = clk_get(NULL, "ck_ref");
 	unsigned long	rate;
@@ -246,6 +272,66 @@
 	omap_init_clocksource(rate);
 }
 
+#else
+static inline void omap_mpu_timer_init(void)
+{
+	pr_err("Bogus timer, should not happen\n");
+}
+#endif	/* CONFIG_OMAP_MPU_TIMER */
+
+#if defined(CONFIG_OMAP_MPU_TIMER) && defined(CONFIG_OMAP_32K_TIMER)
+static unsigned long long (*preferred_sched_clock)(void);
+
+unsigned long long notrace sched_clock(void)
+{
+	if (!preferred_sched_clock)
+		return 0;
+
+	return preferred_sched_clock();
+}
+
+static inline void preferred_sched_clock_init(bool use_32k_sched_clock)
+{
+	if (use_32k_sched_clock)
+		preferred_sched_clock = omap_32k_sched_clock;
+	else
+		preferred_sched_clock = omap_mpu_sched_clock;
+}
+#else
+static inline void preferred_sched_clock_init(bool use_32k_sched_clcok)
+{
+}
+#endif
+
+static inline int omap_32k_timer_usable(void)
+{
+	int res = false;
+
+	if (cpu_is_omap730() || cpu_is_omap15xx())
+		return res;
+
+#ifdef CONFIG_OMAP_32K_TIMER
+	res = omap_32k_timer_init();
+#endif
+
+	return res;
+}
+
+/*
+ * ---------------------------------------------------------------------------
+ * Timer initialization
+ * ---------------------------------------------------------------------------
+ */
+static void __init omap_timer_init(void)
+{
+	if (omap_32k_timer_usable()) {
+		preferred_sched_clock_init(1);
+	} else {
+		omap_mpu_timer_init();
+		preferred_sched_clock_init(0);
+	}
+}
+
 struct sys_timer omap_timer = {
 	.init		= omap_timer_init,
 };
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 20cfbcc..13d7b8f 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -52,10 +52,9 @@
 #include <asm/irq.h>
 #include <asm/mach/irq.h>
 #include <asm/mach/time.h>
+#include <plat/common.h>
 #include <plat/dmtimer.h>
 
-struct sys_timer omap_timer;
-
 /*
  * ---------------------------------------------------------------------------
  * 32KHz OS timer
@@ -181,14 +180,14 @@
  * Timer initialization
  * ---------------------------------------------------------------------------
  */
-static void __init omap_timer_init(void)
+bool __init omap_32k_timer_init(void)
 {
+	omap_init_clocksource_32k();
+
 #ifdef CONFIG_OMAP_DM_TIMER
 	omap_dm_timer_init();
 #endif
 	omap_init_32k_timer();
-}
 
-struct sys_timer omap_timer = {
-	.init		= omap_timer_init,
-};
+	return true;
+}
diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c
index 5b0c777..8f9a64d 100644
--- a/arch/arm/mach-omap2/board-cm-t3517.c
+++ b/arch/arm/mach-omap2/board-cm-t3517.c
@@ -124,8 +124,9 @@
 #if defined(CONFIG_RTC_DRV_V3020) || defined(CONFIG_RTC_DRV_V3020_MODULE)
 #define RTC_IO_GPIO		(153)
 #define RTC_WR_GPIO		(154)
-#define RTC_RD_GPIO		(160)
+#define RTC_RD_GPIO		(53)
 #define RTC_CS_GPIO		(163)
+#define RTC_CS_EN_GPIO		(160)
 
 struct v3020_platform_data cm_t3517_v3020_pdata = {
 	.use_gpio	= 1,
@@ -145,6 +146,16 @@
 
 static void __init cm_t3517_init_rtc(void)
 {
+	int err;
+
+	err = gpio_request(RTC_CS_EN_GPIO, "rtc cs en");
+	if (err) {
+		pr_err("CM-T3517: rtc cs en gpio request failed: %d\n", err);
+		return;
+	}
+
+	gpio_direction_output(RTC_CS_EN_GPIO, 1);
+
 	platform_device_register(&cm_t3517_rtc_device);
 }
 #else
@@ -214,12 +225,12 @@
 	},
 	{
 		.name           = "linux",
-		.offset         = MTDPART_OFS_APPEND,	/* Offset = 0x280000 */
+		.offset         = MTDPART_OFS_APPEND,	/* Offset = 0x2A0000 */
 		.size           = 32 * NAND_BLOCK_SIZE,
 	},
 	{
 		.name           = "rootfs",
-		.offset         = MTDPART_OFS_APPEND,	/* Offset = 0x680000 */
+		.offset         = MTDPART_OFS_APPEND,	/* Offset = 0x6A0000 */
 		.size           = MTDPART_SIZ_FULL,
 	},
 };
@@ -256,11 +267,19 @@
 static struct omap_board_mux board_mux[] __initdata = {
 	/* GPIO186 - Green LED */
 	OMAP3_MUX(SYS_CLKOUT2, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
-	/* RTC GPIOs: IO, WR#, RD#, CS# */
+
+	/* RTC GPIOs: */
+	/* IO - GPIO153 */
 	OMAP3_MUX(MCBSP4_DR, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+	/* WR# - GPIO154 */
 	OMAP3_MUX(MCBSP4_DX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
-	OMAP3_MUX(MCBSP_CLKS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+	/* RD# - GPIO53 */
+	OMAP3_MUX(GPMC_NCS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+	/* CS# - GPIO163 */
 	OMAP3_MUX(UART3_CTS_RCTX, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+	/* CS EN - GPIO160 */
+	OMAP3_MUX(MCBSP_CLKS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
+
 	/* HSUSB1 RESET */
 	OMAP3_MUX(UART2_TX, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
 	/* HSUSB2 RESET */
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index 00bb1fc..9a2a31e 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -115,9 +115,6 @@
 
 static int devkit8000_panel_enable_lcd(struct omap_dss_device *dssdev)
 {
-	twl_i2c_write_u8(TWL4030_MODULE_GPIO, 0x80, REG_GPIODATADIR1);
-	twl_i2c_write_u8(TWL4030_MODULE_LED, 0x0, 0x0);
-
 	if (gpio_is_valid(dssdev->reset_gpio))
 		gpio_set_value_cansleep(dssdev->reset_gpio, 1);
 	return 0;
@@ -247,6 +244,8 @@
 static int devkit8000_twl_gpio_setup(struct device *dev,
 		unsigned gpio, unsigned ngpio)
 {
+	int ret;
+
 	omap_mux_init_gpio(29, OMAP_PIN_INPUT);
 	/* gpio + 0 is "mmc0_cd" (input/IRQ) */
 	mmc[0].gpio_cd = gpio + 0;
@@ -255,17 +254,23 @@
 	/* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
 	gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
 
-        /* gpio + 1 is "LCD_PWREN" (out, active high) */
-	devkit8000_lcd_device.reset_gpio = gpio + 1;
-	gpio_request(devkit8000_lcd_device.reset_gpio, "LCD_PWREN");
-	/* Disable until needed */
-	gpio_direction_output(devkit8000_lcd_device.reset_gpio, 0);
+	/* TWL4030_GPIO_MAX + 0 is "LCD_PWREN" (out, active high) */
+	devkit8000_lcd_device.reset_gpio = gpio + TWL4030_GPIO_MAX + 0;
+	ret = gpio_request_one(devkit8000_lcd_device.reset_gpio,
+			GPIOF_DIR_OUT | GPIOF_INIT_LOW, "LCD_PWREN");
+	if (ret < 0) {
+		devkit8000_lcd_device.reset_gpio = -EINVAL;
+		printk(KERN_ERR "Failed to request GPIO for LCD_PWRN\n");
+	}
 
 	/* gpio + 7 is "DVI_PD" (out, active low) */
 	devkit8000_dvi_device.reset_gpio = gpio + 7;
-	gpio_request(devkit8000_dvi_device.reset_gpio, "DVI PowerDown");
-	/* Disable until needed */
-	gpio_direction_output(devkit8000_dvi_device.reset_gpio, 0);
+	ret = gpio_request_one(devkit8000_dvi_device.reset_gpio,
+			GPIOF_DIR_OUT | GPIOF_INIT_LOW, "DVI PowerDown");
+	if (ret < 0) {
+		devkit8000_dvi_device.reset_gpio = -EINVAL;
+		printk(KERN_ERR "Failed to request GPIO for DVI PowerDown\n");
+	}
 
 	return 0;
 }
@@ -275,8 +280,7 @@
 	.irq_base	= TWL4030_GPIO_IRQ_BASE,
 	.irq_end	= TWL4030_GPIO_IRQ_END,
 	.use_leds	= true,
-	.pullups	= BIT(1),
-	.pulldowns	= BIT(2) | BIT(6) | BIT(7) | BIT(8) | BIT(13)
+	.pulldowns	= BIT(1) | BIT(2) | BIT(6) | BIT(8) | BIT(13)
 				| BIT(15) | BIT(16) | BIT(17),
 	.setup		= devkit8000_twl_gpio_setup,
 };
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index e001a04..e944025 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -409,8 +409,6 @@
 	platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
 	omap_serial_init();
 	omap4_twl6030_hsmmc_init(mmc);
-	/* OMAP4 Panda uses internal transceiver so register nop transceiver */
-	usb_nop_xceiv_register();
 	omap4_ehci_init();
 	usb_musb_init(&musb_board_data);
 }
diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c
index cb77be7..39a71bb 100644
--- a/arch/arm/mach-omap2/board-rm680.c
+++ b/arch/arm/mach-omap2/board-rm680.c
@@ -40,9 +40,6 @@
 static struct regulator_init_data rm680_vemmc = {
 	.constraints =	{
 		.name			= "rm680_vemmc",
-		.min_uV			= 2900000,
-		.max_uV			= 2900000,
-		.apply_uV		= 1,
 		.valid_modes_mask	= REGULATOR_MODE_NORMAL
 					| REGULATOR_MODE_STANDBY,
 		.valid_ops_mask		= REGULATOR_CHANGE_STATUS
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index e8cb32f..de9ec8d 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -34,7 +34,6 @@
 #include "cm2_44xx.h"
 #include "cm-regbits-44xx.h"
 #include "prm44xx.h"
-#include "prm44xx.h"
 #include "prm-regbits-44xx.h"
 #include "control.h"
 #include "scrm44xx.h"
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index e20b986..58e42f7 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -423,6 +423,12 @@
 {
 	struct clkdm_dep *cd;
 
+	if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) {
+		pr_err("clockdomain: %s/%s: %s: not yet implemented\n",
+		       clkdm1->name, clkdm2->name, __func__);
+		return -EINVAL;
+	}
+
 	if (!clkdm1 || !clkdm2)
 		return -EINVAL;
 
@@ -458,6 +464,12 @@
 {
 	struct clkdm_dep *cd;
 
+	if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) {
+		pr_err("clockdomain: %s/%s: %s: not yet implemented\n",
+		       clkdm1->name, clkdm2->name, __func__);
+		return -EINVAL;
+	}
+
 	if (!clkdm1 || !clkdm2)
 		return -EINVAL;
 
@@ -500,6 +512,12 @@
 	if (!clkdm1 || !clkdm2)
 		return -EINVAL;
 
+	if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) {
+		pr_err("clockdomain: %s/%s: %s: not yet implemented\n",
+		       clkdm1->name, clkdm2->name, __func__);
+		return -EINVAL;
+	}
+
 	cd = _clkdm_deps_lookup(clkdm2, clkdm1->wkdep_srcs);
 	if (IS_ERR(cd)) {
 		pr_debug("clockdomain: hardware cannot set/clear wake up of "
@@ -527,6 +545,12 @@
 	struct clkdm_dep *cd;
 	u32 mask = 0;
 
+	if (!cpu_is_omap24xx() && !cpu_is_omap34xx()) {
+		pr_err("clockdomain: %s: %s: not yet implemented\n",
+		       clkdm->name, __func__);
+		return -EINVAL;
+	}
+
 	if (!clkdm)
 		return -EINVAL;
 
@@ -830,8 +854,7 @@
 	 * dependency code and data for OMAP4.
 	 */
 	if (cpu_is_omap44xx()) {
-		WARN_ONCE(1, "clockdomain: OMAP4 wakeup/sleep dependency "
-			  "support is not yet implemented\n");
+		pr_err("clockdomain: %s: OMAP4 wakeup/sleep dependency support: not yet implemented\n", clkdm->name);
 	} else {
 		if (atomic_read(&clkdm->usecount) > 0)
 			_clkdm_add_autodeps(clkdm);
@@ -872,8 +895,7 @@
 	 * dependency code and data for OMAP4.
 	 */
 	if (cpu_is_omap44xx()) {
-		WARN_ONCE(1, "clockdomain: OMAP4 wakeup/sleep dependency "
-			  "support is not yet implemented\n");
+		pr_err("clockdomain: %s: OMAP4 wakeup/sleep dependency support: not yet implemented\n", clkdm->name);
 	} else {
 		if (atomic_read(&clkdm->usecount) > 0)
 			_clkdm_del_autodeps(clkdm);
diff --git a/arch/arm/mach-omap2/clockdomains44xx_data.c b/arch/arm/mach-omap2/clockdomains44xx_data.c
index 51920fc..10622c9 100644
--- a/arch/arm/mach-omap2/clockdomains44xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains44xx_data.c
@@ -30,8 +30,6 @@
 #include "cm1_44xx.h"
 #include "cm2_44xx.h"
 
-#include "cm1_44xx.h"
-#include "cm2_44xx.h"
 #include "cm-regbits-44xx.h"
 #include "prm44xx.h"
 #include "prcm44xx.h"
diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c
index d2f15f5..34922b2 100644
--- a/arch/arm/mach-omap2/dma.c
+++ b/arch/arm/mach-omap2/dma.c
@@ -264,7 +264,7 @@
 	if (IS_ERR(od)) {
 		pr_err("%s: Cant build omap_device for %s:%s.\n",
 			__func__, name, oh->name);
-		return IS_ERR(od);
+		return PTR_ERR(od);
 	}
 
 	mem = platform_get_resource(&od->pdev, IORESOURCE_MEM, 0);
diff --git a/arch/arm/mach-omap2/include/mach/entry-macro.S b/arch/arm/mach-omap2/include/mach/entry-macro.S
index befa321..81985a6 100644
--- a/arch/arm/mach-omap2/include/mach/entry-macro.S
+++ b/arch/arm/mach-omap2/include/mach/entry-macro.S
@@ -38,20 +38,6 @@
  */
 
 #ifdef MULTI_OMAP2
-
-/*
- * We use __glue to avoid errors with multiple definitions of
- * .globl omap_irq_base as it's included from entry-armv.S but not
- * from entry-common.S.
- */
-#ifdef __glue
-		.pushsection .data
-		.globl	omap_irq_base
-omap_irq_base:
-		.word	0
-		.popsection
-#endif
-
 		/*
 		 * Configure the interrupt base on the first interrupt.
 		 * See also omap_irq_base_init for setting omap_irq_base.
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index e66687b..c203204 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -314,14 +314,13 @@
 	return omap_hwmod_set_postsetup_state(oh, *(u8 *)data);
 }
 
+void __iomem *omap_irq_base;
+
 /*
  * Initialize asm_irq_base for entry-macro.S
  */
 static inline void omap_irq_base_init(void)
 {
-	extern void __iomem *omap_irq_base;
-
-#ifdef MULTI_OMAP2
 	if (cpu_is_omap24xx())
 		omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP24XX_IC_BASE);
 	else if (cpu_is_omap34xx())
@@ -330,7 +329,6 @@
 		omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP44XX_GIC_CPU_BASE);
 	else
 		pr_err("Could not initialize omap_irq_base\n");
-#endif
 }
 
 void __init omap2_init_common_infrastructure(void)
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index df8d2f2..98148b6 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -160,7 +160,7 @@
 	struct omap_mux *mux = NULL;
 	struct omap_mux_entry *e;
 	const char *mode_name;
-	int found = 0, found_mode, mode0_len = 0;
+	int found = 0, found_mode = 0, mode0_len = 0;
 	struct list_head *muxmodes = &partition->muxmodes;
 
 	mode_name = strchr(muxname, '.');
@@ -1000,6 +1000,7 @@
 	if (!partition->base) {
 		pr_err("%s: Could not ioremap mux partition at 0x%08x\n",
 			__func__, partition->phys);
+		kfree(partition);
 		return -ENODEV;
 	}
 
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index 9e5dc8e..97feb3a 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -134,7 +134,7 @@
 
 	/* Block console output in case it is on one of the OMAP UARTs */
 	if (!is_suspending())
-		if (try_acquire_console_sem())
+		if (!console_trylock())
 			goto no_sleep;
 
 	omap_uart_prepare_idle(0);
@@ -151,7 +151,7 @@
 	omap_uart_resume_idle(0);
 
 	if (!is_suspending())
-		release_console_sem();
+		console_unlock();
 
 no_sleep:
 	if (omap2_pm_debug) {
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 8cbbead..2f864e4 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -168,9 +168,10 @@
  * once during boot sequence, but this works as we are not using secure
  * services.
  */
-static void omap3_save_secure_ram_context(u32 target_mpu_state)
+static void omap3_save_secure_ram_context(void)
 {
 	u32 ret;
+	int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
 
 	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
 		/*
@@ -181,7 +182,7 @@
 		pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
 		ret = _omap_save_secure_sram((u32 *)
 				__pa(omap3_secure_ram_storage));
-		pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state);
+		pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
 		/* Following is for error tracking, it should not happen */
 		if (ret) {
 			printk(KERN_ERR "save_secure_sram() returns %08x\n",
@@ -398,7 +399,7 @@
 	if (!is_suspending())
 		if (per_next_state < PWRDM_POWER_ON ||
 		    core_next_state < PWRDM_POWER_ON)
-			if (try_acquire_console_sem())
+			if (!console_trylock())
 				goto console_still_active;
 
 	/* PER */
@@ -481,7 +482,7 @@
 	}
 
 	if (!is_suspending())
-		release_console_sem();
+		console_unlock();
 
 console_still_active:
 	/* Disable IO-PAD and IO-CHAIN wakeup */
@@ -1094,7 +1095,7 @@
 		local_fiq_disable();
 
 		omap_dma_global_context_save();
-		omap3_save_secure_ram_context(PWRDM_POWER_ON);
+		omap3_save_secure_ram_context();
 		omap_dma_global_context_restore();
 
 		local_irq_enable();
diff --git a/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c b/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c
index d523389..cf600e2 100644
--- a/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c
+++ b/arch/arm/mach-omap2/powerdomain2xxx_3xxx.c
@@ -19,7 +19,6 @@
 #include <plat/prcm.h>
 
 #include "powerdomain.h"
-#include "prm-regbits-34xx.h"
 #include "prm.h"
 #include "prm-regbits-24xx.h"
 #include "prm-regbits-34xx.h"
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 302da74..32e91a9 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -812,7 +812,7 @@
 
 	oh->dev_attr = uart;
 
-	acquire_console_sem(); /* in case the earlycon is on the UART */
+	console_lock(); /* in case the earlycon is on the UART */
 
 	/*
 	 * Because of early UART probing, UART did not get idled
@@ -838,7 +838,7 @@
 	omap_uart_block_sleep(uart);
 	uart->timeout = DEFAULT_TIMEOUT;
 
-	release_console_sem();
+	console_unlock();
 
 	if ((cpu_is_omap34xx() && uart->padconf) ||
 	    (uart->wk_en && uart->wk_mask)) {
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index 77ecebf..c37e823 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -780,8 +780,7 @@
 	struct omap_sr *sr_info = (struct omap_sr *) data;
 
 	if (!sr_info) {
-		pr_warning("%s: omap_sr struct for sr_%s not found\n",
-			__func__, sr_info->voltdm->name);
+		pr_warning("%s: omap_sr struct not found\n", __func__);
 		return -EINVAL;
 	}
 
@@ -795,8 +794,7 @@
 	struct omap_sr *sr_info = (struct omap_sr *) data;
 
 	if (!sr_info) {
-		pr_warning("%s: omap_sr struct for sr_%s not found\n",
-			__func__, sr_info->voltdm->name);
+		pr_warning("%s: omap_sr struct not found\n", __func__);
 		return -EINVAL;
 	}
 
@@ -834,7 +832,8 @@
 
 	if (!pdata) {
 		dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto err_free_devinfo;
 	}
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -966,7 +965,7 @@
 	}
 
 	sr_info = _sr_lookup(pdata->voltdm);
-	if (!sr_info) {
+	if (IS_ERR(sr_info)) {
 		dev_warn(&pdev->dev, "%s: omap_sr struct not found\n",
 			__func__);
 		return -EINVAL;
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
index 4e48e78..7b7c268 100644
--- a/arch/arm/mach-omap2/timer-gp.c
+++ b/arch/arm/mach-omap2/timer-gp.c
@@ -42,6 +42,8 @@
 
 #include "timer-gp.h"
 
+#include <plat/common.h>
+
 /* MAX_GPTIMER_ID: number of GPTIMERs on the chip */
 #define MAX_GPTIMER_ID		12
 
@@ -176,10 +178,14 @@
 /* 
  * When 32k-timer is enabled, don't use GPTimer for clocksource
  * instead, just leave default clocksource which uses the 32k
- * sync counter.  See clocksource setup in see plat-omap/common.c. 
+ * sync counter.  See clocksource setup in plat-omap/counter_32k.c
  */
 
-static inline void __init omap2_gp_clocksource_init(void) {}
+static void __init omap2_gp_clocksource_init(void)
+{
+	omap_init_clocksource_32k();
+}
+
 #else
 /*
  * clocksource
diff --git a/arch/arm/mach-omap2/voltage.c b/arch/arm/mach-omap2/voltage.c
index ed6079c..12be525 100644
--- a/arch/arm/mach-omap2/voltage.c
+++ b/arch/arm/mach-omap2/voltage.c
@@ -471,6 +471,7 @@
 	strcat(name, vdd->voltdm.name);
 
 	vdd->debug_dir = debugfs_create_dir(name, voltage_dir);
+	kfree(name);
 	if (IS_ERR(vdd->debug_dir)) {
 		pr_warning("%s: Unable to create debugfs directory for"
 			" vdd_%s\n", __func__, vdd->voltdm.name);
diff --git a/arch/arm/mach-pxa/colibri-evalboard.c b/arch/arm/mach-pxa/colibri-evalboard.c
index 6b2c800..28f667e 100644
--- a/arch/arm/mach-pxa/colibri-evalboard.c
+++ b/arch/arm/mach-pxa/colibri-evalboard.c
@@ -50,7 +50,7 @@
 			GPIO0_COLIBRI_PXA270_SD_DETECT;
 	if (machine_is_colibri300())	/* PXA300 Colibri */
 		colibri_mci_platform_data.gpio_card_detect =
-			GPIO39_COLIBRI_PXA300_SD_DETECT;
+			GPIO13_COLIBRI_PXA300_SD_DETECT;
 	else				/* PXA320 Colibri */
 		colibri_mci_platform_data.gpio_card_detect =
 			GPIO28_COLIBRI_PXA320_SD_DETECT;
diff --git a/arch/arm/mach-pxa/colibri-pxa300.c b/arch/arm/mach-pxa/colibri-pxa300.c
index fddb16d..66dd81c 100644
--- a/arch/arm/mach-pxa/colibri-pxa300.c
+++ b/arch/arm/mach-pxa/colibri-pxa300.c
@@ -41,7 +41,7 @@
 	GPIO4_MMC1_DAT1,
 	GPIO5_MMC1_DAT2,
 	GPIO6_MMC1_DAT3,
-	GPIO39_GPIO,	/* SD detect */
+	GPIO13_GPIO,	/* GPIO13_COLIBRI_PXA300_SD_DETECT */
 
 	/* UHC */
 	GPIO0_2_USBH_PEN,
diff --git a/arch/arm/mach-pxa/include/mach/colibri.h b/arch/arm/mach-pxa/include/mach/colibri.h
index 388a96f..cb4236e 100644
--- a/arch/arm/mach-pxa/include/mach/colibri.h
+++ b/arch/arm/mach-pxa/include/mach/colibri.h
@@ -60,7 +60,7 @@
 #define GPIO113_COLIBRI_PXA270_TS_IRQ	113
 
 /* GPIO definitions for Colibri PXA300/310 */
-#define GPIO39_COLIBRI_PXA300_SD_DETECT	39
+#define GPIO13_COLIBRI_PXA300_SD_DETECT	13
 
 /* GPIO definitions for Colibri PXA320 */
 #define GPIO28_COLIBRI_PXA320_SD_DETECT	28
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 405b92a..35572c4 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -323,7 +323,7 @@
 	.pwm_id		= 0,
 	.max_brightness	= 0xfe,
 	.dft_brightness	= 0x7e,
-	.pwm_period_ns	= 3500,
+	.pwm_period_ns	= 3500 * 1024,
 	.init		= palm27x_backlight_init,
 	.notify		= palm27x_backlight_notify,
 	.exit		= palm27x_backlight_exit,
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
index 978e1b2..1807c9a 100644
--- a/arch/arm/mach-pxa/pm.c
+++ b/arch/arm/mach-pxa/pm.c
@@ -33,7 +33,7 @@
 #endif
 
 	/* skip registers saving for standby */
-	if (state != PM_SUSPEND_STANDBY) {
+	if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->save) {
 		pxa_cpu_pm_fns->save(sleep_save);
 		/* before sleeping, calculate and save a checksum */
 		for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++)
@@ -44,7 +44,7 @@
 	pxa_cpu_pm_fns->enter(state);
 	cpu_init();
 
-	if (state != PM_SUSPEND_STANDBY) {
+	if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) {
 		/* after sleeping, validate the checksum */
 		for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++)
 			checksum += sleep_save[i];
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig
index b4575ae..7ca138a 100644
--- a/arch/arm/mach-realview/Kconfig
+++ b/arch/arm/mach-realview/Kconfig
@@ -2,52 +2,56 @@
 	depends on ARCH_REALVIEW
 
 config MACH_REALVIEW_EB
-	bool "Support RealView/EB platform"
+	bool "Support RealView(R) Emulation Baseboard"
 	select ARM_GIC
 	help
-	  Include support for the ARM(R) RealView Emulation Baseboard platform.
+	  Include support for the ARM(R) RealView(R) Emulation Baseboard
+	  platform.
 
 config REALVIEW_EB_A9MP
-	bool "Support Multicore Cortex-A9"
+	bool "Support Multicore Cortex-A9 Tile"
 	depends on MACH_REALVIEW_EB
 	select CPU_V7
 	help
-	  Enable support for the Cortex-A9MPCore tile on the Realview platform.
+	  Enable support for the Cortex-A9MPCore tile fitted to the
+	  Realview(R) Emulation Baseboard platform.
 
 config REALVIEW_EB_ARM11MP
-	bool "Support ARM11MPCore tile"
+	bool "Support ARM11MPCore Tile"
 	depends on MACH_REALVIEW_EB
 	select CPU_V6
 	select ARCH_HAS_BARRIERS if SMP
 	help
-	  Enable support for the ARM11MPCore tile on the Realview platform.
+	  Enable support for the ARM11MPCore tile fitted to the Realview(R)
+	  Emulation Baseboard platform.
 
 config REALVIEW_EB_ARM11MP_REVB
-	bool "Support ARM11MPCore RevB tile"
+	bool "Support ARM11MPCore RevB Tile"
 	depends on REALVIEW_EB_ARM11MP
 	help
-	  Enable support for the ARM11MPCore RevB tile on the Realview
-	  platform. Since there are device address differences, a
-	  kernel built with this option enabled is not compatible with
-	  other revisions of the ARM11MPCore tile.
+	  Enable support for the ARM11MPCore Revision B tile on the
+	  Realview(R) Emulation Baseboard platform. Since there are device
+	  address differences, a kernel built with this option enabled is
+	  not compatible with other revisions of the ARM11MPCore tile.
 
 config MACH_REALVIEW_PB11MP
-	bool "Support RealView/PB11MPCore platform"
+	bool "Support RealView(R) Platform Baseboard for ARM11MPCore"
 	select CPU_V6
 	select ARM_GIC
 	select HAVE_PATA_PLATFORM
 	select ARCH_HAS_BARRIERS if SMP
 	help
-	  Include support for the ARM(R) RealView MPCore Platform Baseboard.
-	  PB11MPCore is a platform with an on-board ARM11MPCore and has
+	  Include support for the ARM(R) RealView(R) Platform Baseboard for
+	  the ARM11MPCore.  This platform has an on-board ARM11MPCore and has
 	  support for PCI-E and Compact Flash.
 
 config MACH_REALVIEW_PB1176
-	bool "Support RealView/PB1176 platform"
+	bool "Support RealView(R) Platform Baseboard for ARM1176JZF-S"
 	select CPU_V6
 	select ARM_GIC
 	help
-	  Include support for the ARM(R) RealView ARM1176 Platform Baseboard.
+	  Include support for the ARM(R) RealView(R) Platform Baseboard for
+	  ARM1176JZF-S.
 
 config REALVIEW_PB1176_SECURE_FLASH
 	bool "Allow access to the secure flash memory block"
@@ -59,23 +63,24 @@
 	  block (64MB @ 0x3c000000) is required.
 
 config MACH_REALVIEW_PBA8
-	bool "Support RealView/PB-A8 platform"
+	bool "Support RealView(R) Platform Baseboard for Cortex(tm)-A8 platform"
 	select CPU_V7
 	select ARM_GIC
 	select HAVE_PATA_PLATFORM
 	help
-	  Include support for the ARM(R) RealView Cortex-A8 Platform Baseboard.
-	  PB-A8 is a platform with an on-board Cortex-A8 and has support for
-	  PCI-E and Compact Flash.
+	  Include support for the ARM(R) RealView Platform Baseboard for
+	  Cortex(tm)-A8.  This platform has an on-board Cortex-A8 and has
+	  support for PCI-E and Compact Flash.
 
 config MACH_REALVIEW_PBX
-	bool "Support RealView/PBX platform"
+	bool "Support RealView(R) Platform Baseboard Explore"
 	select ARM_GIC
 	select HAVE_PATA_PLATFORM
 	select ARCH_SPARSEMEM_ENABLE if CPU_V7 && !REALVIEW_HIGH_PHYS_OFFSET
 	select ZONE_DMA if SPARSEMEM
 	help
-	  Include support for the ARM(R) RealView PBX platform.
+	  Include support for the ARM(R) RealView(R) Platform Baseboard
+	  Explore.
 
 config REALVIEW_HIGH_PHYS_OFFSET
 	bool "High physical base address for the RealView platform"
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c
index a22bf67..6959d13 100644
--- a/arch/arm/mach-realview/platsmp.c
+++ b/arch/arm/mach-realview/platsmp.c
@@ -41,7 +41,7 @@
  * observers, irrespective of whether they're taking part in coherency
  * or not.  This is necessary for the hotplug code to work reliably.
  */
-static void write_pen_release(int val)
+static void __cpuinit write_pen_release(int val)
 {
 	pen_release = val;
 	smp_wmb();
diff --git a/arch/arm/mach-s5pv310/Kconfig b/arch/arm/mach-s5pv310/Kconfig
index 09c4c21..b2a9acc 100644
--- a/arch/arm/mach-s5pv310/Kconfig
+++ b/arch/arm/mach-s5pv310/Kconfig
@@ -122,6 +122,7 @@
 	select S3C_DEV_HSMMC2
 	select S3C_DEV_HSMMC3
 	select S5PV310_DEV_PD
+	select S5PV310_DEV_SYSMMU
 	select S5PV310_SETUP_I2C1
 	select S5PV310_SETUP_SDHCI
 	help
diff --git a/arch/arm/mach-s5pv310/include/mach/map.h b/arch/arm/mach-s5pv310/include/mach/map.h
index 74d4006..3060f78 100644
--- a/arch/arm/mach-s5pv310/include/mach/map.h
+++ b/arch/arm/mach-s5pv310/include/mach/map.h
@@ -124,8 +124,6 @@
 #define S5PV310_PA_SYSMMU_TV		0x12E20000
 #define S5PV310_PA_SYSMMU_MFC_L		0x13620000
 #define S5PV310_PA_SYSMMU_MFC_R		0x13630000
-#define S5PV310_SYSMMU_TOTAL_IPNUM	16
-#define S5P_SYSMMU_TOTAL_IPNUM		S5PV310_SYSMMU_TOTAL_IPNUM
 
 /* compatibiltiy defines. */
 #define S3C_PA_UART			S5PV310_PA_UART
diff --git a/arch/arm/mach-s5pv310/include/mach/sysmmu.h b/arch/arm/mach-s5pv310/include/mach/sysmmu.h
index 662fe85..598fc5c 100644
--- a/arch/arm/mach-s5pv310/include/mach/sysmmu.h
+++ b/arch/arm/mach-s5pv310/include/mach/sysmmu.h
@@ -13,6 +13,9 @@
 #ifndef __ASM_ARM_ARCH_SYSMMU_H
 #define __ASM_ARM_ARCH_SYSMMU_H __FILE__
 
+#define S5PV310_SYSMMU_TOTAL_IPNUM	16
+#define S5P_SYSMMU_TOTAL_IPNUM		S5PV310_SYSMMU_TOTAL_IPNUM
+
 enum s5pv310_sysmmu_ips {
 	SYSMMU_MDMA,
 	SYSMMU_SSS,
@@ -32,7 +35,7 @@
 	SYSMMU_MFC_R,
 };
 
-static char *sysmmu_ips_name[S5P_SYSMMU_TOTAL_IPNUM] = {
+static char *sysmmu_ips_name[S5PV310_SYSMMU_TOTAL_IPNUM] = {
 	"SYSMMU_MDMA"	,
 	"SYSMMU_SSS"	,
 	"SYSMMU_FIMC0"	,
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index d43c5ef..bd3e1bf 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -241,6 +241,9 @@
 struct platform_device collie_locomo_device = {
 	.name		= "locomo",
 	.id		= 0,
+	.dev		= {
+		.platform_data	= &locomo_info,
+	},
 	.num_resources	= ARRAY_SIZE(locomo_resources),
 	.resource	= locomo_resources,
 };
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 4d1b4c5..0c8f6cf 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -60,6 +60,8 @@
 
 config MACH_AG5EVM
 	bool "AG5EVM board"
+	select ARCH_REQUIRE_GPIOLIB
+	select SH_LCD_MIPI_DSI
 	depends on ARCH_SH73A0
 
 config MACH_MACKEREL
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index c18a740..2123b96 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -34,9 +34,10 @@
 #include <linux/input/sh_keysc.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/sh_mmcif.h>
-
+#include <linux/sh_clk.h>
+#include <video/sh_mobile_lcdc.h>
+#include <video/sh_mipi_dsi.h>
 #include <sound/sh_fsi.h>
-
 #include <mach/hardware.h>
 #include <mach/sh73a0.h>
 #include <mach/common.h>
@@ -183,11 +184,165 @@
 	.resource	= sh_mmcif_resources,
 };
 
+/* IrDA */
+static struct resource irda_resources[] = {
+	[0] = {
+		.start	= 0xE6D00000,
+		.end	= 0xE6D01FD4 - 1,
+		.flags  = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= gic_spi(95),
+		.flags  = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device irda_device = {
+	.name           = "sh_irda",
+	.id		= 0,
+	.resource       = irda_resources,
+	.num_resources  = ARRAY_SIZE(irda_resources),
+};
+
+static unsigned char lcd_backlight_seq[3][2] = {
+	{ 0x04, 0x07 },
+	{ 0x23, 0x80 },
+	{ 0x03, 0x01 },
+};
+
+static void lcd_backlight_on(void)
+{
+	struct i2c_adapter *a;
+	struct i2c_msg msg;
+	int k;
+
+	a = i2c_get_adapter(1);
+	for (k = 0; a && k < 3; k++) {
+		msg.addr = 0x6d;
+		msg.buf = &lcd_backlight_seq[k][0];
+		msg.len = 2;
+		msg.flags = 0;
+		if (i2c_transfer(a, &msg, 1) != 1)
+			break;
+	}
+}
+
+static void lcd_backlight_reset(void)
+{
+	gpio_set_value(GPIO_PORT235, 0);
+	mdelay(24);
+	gpio_set_value(GPIO_PORT235, 1);
+}
+
+static void lcd_on(void *board_data, struct fb_info *info)
+{
+	lcd_backlight_on();
+}
+
+static void lcd_off(void *board_data)
+{
+	lcd_backlight_reset();
+}
+
+/* LCDC0 */
+static const struct fb_videomode lcdc0_modes[] = {
+	{
+		.name		= "R63302(QHD)",
+		.xres		= 544,
+		.yres		= 961,
+		.left_margin	= 72,
+		.right_margin	= 600,
+		.hsync_len	= 16,
+		.upper_margin	= 8,
+		.lower_margin	= 8,
+		.vsync_len	= 2,
+		.sync		= FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT,
+	},
+};
+
+static struct sh_mobile_lcdc_info lcdc0_info = {
+	.clock_source = LCDC_CLK_PERIPHERAL,
+	.ch[0] = {
+		.chan = LCDC_CHAN_MAINLCD,
+		.interface_type = RGB24,
+		.clock_divider = 1,
+		.flags = LCDC_FLAGS_DWPOL,
+		.lcd_size_cfg.width = 44,
+		.lcd_size_cfg.height = 79,
+		.bpp = 16,
+		.lcd_cfg = lcdc0_modes,
+		.num_cfg = ARRAY_SIZE(lcdc0_modes),
+		.board_cfg = {
+			.display_on = lcd_on,
+			.display_off = lcd_off,
+		},
+	}
+};
+
+static struct resource lcdc0_resources[] = {
+	[0] = {
+		.name	= "LCDC0",
+		.start	= 0xfe940000, /* P4-only space */
+		.end	= 0xfe943fff,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= intcs_evt2irq(0x580),
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device lcdc0_device = {
+	.name		= "sh_mobile_lcdc_fb",
+	.num_resources	= ARRAY_SIZE(lcdc0_resources),
+	.resource	= lcdc0_resources,
+	.id             = 0,
+	.dev	= {
+		.platform_data	= &lcdc0_info,
+		.coherent_dma_mask = ~0,
+	},
+};
+
+/* MIPI-DSI */
+static struct resource mipidsi0_resources[] = {
+	[0] = {
+		.start  = 0xfeab0000,
+		.end    = 0xfeab3fff,
+		.flags  = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start  = 0xfeab4000,
+		.end    = 0xfeab7fff,
+		.flags  = IORESOURCE_MEM,
+	},
+};
+
+static struct sh_mipi_dsi_info mipidsi0_info = {
+	.data_format	= MIPI_RGB888,
+	.lcd_chan	= &lcdc0_info.ch[0],
+	.vsynw_offset	= 20,
+	.clksrc		= 1,
+	.flags		= SH_MIPI_DSI_HSABM,
+};
+
+static struct platform_device mipidsi0_device = {
+	.name           = "sh-mipi-dsi",
+	.num_resources  = ARRAY_SIZE(mipidsi0_resources),
+	.resource       = mipidsi0_resources,
+	.id             = 0,
+	.dev	= {
+		.platform_data	= &mipidsi0_info,
+	},
+};
+
 static struct platform_device *ag5evm_devices[] __initdata = {
 	&eth_device,
 	&keysc_device,
 	&fsi_device,
 	&mmc_device,
+	&irda_device,
+	&lcdc0_device,
+	&mipidsi0_device,
 };
 
 static struct map_desc ag5evm_io_desc[] __initdata = {
@@ -224,6 +379,8 @@
 	__raw_writew(__raw_readw(PINTCR0A) | (2<<10), PINTCR0A);
 }
 
+#define DSI0PHYCR	0xe615006c
+
 static void __init ag5evm_init(void)
 {
 	sh73a0_pinmux_init();
@@ -287,6 +444,25 @@
 	gpio_request(GPIO_FN_FSIAISLD, NULL);
 	gpio_request(GPIO_FN_FSIAOSLD, NULL);
 
+	/* IrDA */
+	gpio_request(GPIO_FN_PORT241_IRDA_OUT, NULL);
+	gpio_request(GPIO_FN_PORT242_IRDA_IN,  NULL);
+	gpio_request(GPIO_FN_PORT243_IRDA_FIRSEL, NULL);
+
+	/* LCD panel */
+	gpio_request(GPIO_PORT217, NULL); /* RESET */
+	gpio_direction_output(GPIO_PORT217, 0);
+	mdelay(1);
+	gpio_set_value(GPIO_PORT217, 1);
+
+	/* LCD backlight controller */
+	gpio_request(GPIO_PORT235, NULL); /* RESET */
+	gpio_direction_output(GPIO_PORT235, 0);
+	lcd_backlight_reset();
+
+	/* MIPI-DSI clock setup */
+	__raw_writel(0x2a809010, DSI0PHYCR);
+
 #ifdef CONFIG_CACHE_L2X0
 	/* Shared attribute override enable, 64K*8way */
 	l2x0_init(__io(0xf0100000), 0x00460000, 0xc2000fff);
diff --git a/arch/arm/mach-shmobile/board-g3evm.c b/arch/arm/mach-shmobile/board-g3evm.c
index 686b304..ef4613b 100644
--- a/arch/arm/mach-shmobile/board-g3evm.c
+++ b/arch/arm/mach-shmobile/board-g3evm.c
@@ -347,7 +347,6 @@
 	gpio_request(GPIO_FN_IRDA_OUT, NULL);
 	gpio_request(GPIO_FN_IRDA_IN, NULL);
 	gpio_request(GPIO_FN_IRDA_FIRSEL, NULL);
-	set_irq_type(evt2irq(0x480), IRQ_TYPE_LEVEL_LOW);
 
 	sh7367_add_standard_devices();
 
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 7b15d21..fb4213a 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -169,9 +169,8 @@
  *	SW1	|	SW33
  *		| bit1 | bit2 | bit3 | bit4
  * -------------+------+------+------+-------
- * MMC0	  OFF	|  OFF |  ON  |  ON  |  X
- * MMC1	  ON	|  OFF |  ON  |  X   | ON
- * SDHI1  OFF	|  ON  |   X  |  OFF | ON
+ * MMC0   OFF	|  OFF |   X  |  ON  |  X       (Use MMCIF)
+ * SDHI1  OFF	|  ON  |   X  |  OFF |  X       (Use MFD_SH_MOBILE_SDHI)
  *
  */
 
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index 9aa8d68..e9731b5 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -234,7 +234,9 @@
 
 	value = __raw_readl(PLLC2CR) & ~(0x3f << 24);
 
-	__raw_writel((value & ~0x80000000) | ((idx + 19) << 24), PLLC2CR);
+	__raw_writel(value | ((idx + 19) << 24), PLLC2CR);
+
+	clk->rate = clk->freq_table[idx].frequency;
 
 	return 0;
 }
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index 720a714..ddd4a1b 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -118,8 +118,16 @@
 {
 	unsigned long mult = 1;
 
-	if (__raw_readl(PLLECR) & (1 << clk->enable_bit))
+	if (__raw_readl(PLLECR) & (1 << clk->enable_bit)) {
 		mult = (((__raw_readl(clk->enable_reg) >> 24) & 0x3f) + 1);
+		/* handle CFG bit for PLL1 and PLL2 */
+		switch (clk->enable_bit) {
+		case 1:
+		case 2:
+			if (__raw_readl(clk->enable_reg) & (1 << 20))
+				mult *= 2;
+		}
+	}
 
 	return clk->parent->rate * mult;
 }
@@ -212,7 +220,7 @@
 static struct clk div4_clks[DIV4_NR] = {
 	[DIV4_I] = DIV4(FRQCRA, 20, 0xfff, CLK_ENABLE_ON_INIT),
 	[DIV4_ZG] = DIV4(FRQCRA, 16, 0xbff, CLK_ENABLE_ON_INIT),
-	[DIV4_M3] = DIV4(FRQCRA, 8, 0xfff, CLK_ENABLE_ON_INIT),
+	[DIV4_M3] = DIV4(FRQCRA, 12, 0xfff, CLK_ENABLE_ON_INIT),
 	[DIV4_B] = DIV4(FRQCRA, 8, 0xfff, CLK_ENABLE_ON_INIT),
 	[DIV4_M1] = DIV4(FRQCRA, 4, 0xfff, 0),
 	[DIV4_M2] = DIV4(FRQCRA, 0, 0xfff, 0),
@@ -255,10 +263,10 @@
 };
 
 enum { MSTP001,
-	MSTP125, MSTP116,
+	MSTP125, MSTP118, MSTP116, MSTP100,
 	MSTP219,
 	MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
-	MSTP331, MSTP329, MSTP323, MSTP312,
+	MSTP331, MSTP329, MSTP325, MSTP323, MSTP312,
 	MSTP411, MSTP410, MSTP403,
 	MSTP_NR };
 
@@ -268,7 +276,9 @@
 static struct clk mstp_clks[MSTP_NR] = {
 	[MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */
 	[MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
+	[MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */
 	[MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
+	[MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
 	[MSTP219] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 19, 0), /* SCIFA7 */
 	[MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
 	[MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
@@ -279,6 +289,7 @@
 	[MSTP200] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 0, 0), /* SCIFA4 */
 	[MSTP331] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 31, 0), /* SCIFA6 */
 	[MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
+	[MSTP325] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 25, 0), /* IrDA */
 	[MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */
 	[MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
 	[MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */
@@ -288,16 +299,25 @@
 
 #define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk }
 #define CLKDEV_DEV_ID(_id, _clk) { .dev_id = _id, .clk = _clk }
+#define CLKDEV_ICK_ID(_cid, _did, _clk) { .con_id = _cid, .dev_id = _did, .clk = _clk }
 
 static struct clk_lookup lookups[] = {
 	/* main clocks */
 	CLKDEV_CON_ID("r_clk", &r_clk),
 
+	/* DIV6 clocks */
+	CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]),
+	CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]),
+	CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]),
+	CLKDEV_ICK_ID("dsi1p_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSI1P]),
+
 	/* MSTP32 clocks */
 	CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */
+	CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
 	CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */
 	CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */
 	CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
+	CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
 	CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
 	CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
 	CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
@@ -308,6 +328,7 @@
 	CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[MSTP200]), /* SCIFA4 */
 	CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP331]), /* SCIFA6 */
 	CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
+	CLKDEV_DEV_ID("sh_irda.0", &mstp_clks[MSTP325]), /* IrDA */
 	CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */
 	CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
 	CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */
diff --git a/arch/arm/mach-shmobile/intc-sh7372.c b/arch/arm/mach-shmobile/intc-sh7372.c
index f78a1ea..ca5f9d1 100644
--- a/arch/arm/mach-shmobile/intc-sh7372.c
+++ b/arch/arm/mach-shmobile/intc-sh7372.c
@@ -365,6 +365,7 @@
 
 enum {
 	UNUSED_INTCS = 0,
+	ENABLED_INTCS,
 
 	INTCS,
 
@@ -413,7 +414,7 @@
 	CMT4,
 	DSITX1_DSITX1_0,
 	DSITX1_DSITX1_1,
-	/* MFIS2 */
+	MFIS2_INTCS, /* Priority always enabled using ENABLED_INTCS */
 	CPORTS2R,
 	/* CEC */
 	JPU6E,
@@ -477,7 +478,7 @@
 	INTCS_VECT(CMT4, 0x1980),
 	INTCS_VECT(DSITX1_DSITX1_0, 0x19a0),
 	INTCS_VECT(DSITX1_DSITX1_1, 0x19c0),
-	/* MFIS2 */
+	INTCS_VECT(MFIS2_INTCS, 0x1a00),
 	INTCS_VECT(CPORTS2R, 0x1a20),
 	/* CEC */
 	INTCS_VECT(JPU6E, 0x1a80),
@@ -543,7 +544,7 @@
 	  { 0, TMU1_TUNI2, TMU1_TUNI1, TMU1_TUNI0,
 	    CMT4, DSITX1_DSITX1_0, DSITX1_DSITX1_1, 0 } },
 	{ 0xffd5019c, 0xffd501dc, 8, /* IMR7SA3 / IMCR7SA3 */
-	  { 0, CPORTS2R, 0, 0,
+	  { MFIS2_INTCS, CPORTS2R, 0, 0,
 	    JPU6E, 0, 0, 0 } },
 	{ 0xffd20104, 0, 16, /* INTAMASK */
 	  { 0, 0, 0, 0, 0, 0, 0, 0,
@@ -571,7 +572,8 @@
 	{ 0xffd50030, 0, 16, 4, /* IPRMS3 */ { TMU1, 0, 0, 0 } },
 	{ 0xffd50034, 0, 16, 4, /* IPRNS3 */ { CMT4, DSITX1_DSITX1_0,
 					       DSITX1_DSITX1_1, 0 } },
-	{ 0xffd50038, 0, 16, 4, /* IPROS3 */ { 0, CPORTS2R, 0, 0 } },
+	{ 0xffd50038, 0, 16, 4, /* IPROS3 */ { ENABLED_INTCS, CPORTS2R,
+					       0, 0 } },
 	{ 0xffd5003c, 0, 16, 4, /* IPRPS3 */ { JPU6E, 0, 0, 0 } },
 };
 
@@ -590,6 +592,7 @@
 
 static struct intc_desc intcs_desc __initdata = {
 	.name = "sh7372-intcs",
+	.force_enable = ENABLED_INTCS,
 	.resource = intcs_resources,
 	.num_resources = ARRAY_SIZE(intcs_resources),
 	.hw = INTC_HW_DESC(intcs_vectors, intcs_groups, intcs_mask_registers,
diff --git a/arch/arm/mach-shmobile/intc-sh73a0.c b/arch/arm/mach-shmobile/intc-sh73a0.c
index 322d8d5..5d0e150 100644
--- a/arch/arm/mach-shmobile/intc-sh73a0.c
+++ b/arch/arm/mach-shmobile/intc-sh73a0.c
@@ -252,10 +252,11 @@
 
 void __init sh73a0_init_irq(void)
 {
-	void __iomem *gic_base = __io(0xf0001000);
+	void __iomem *gic_dist_base = __io(0xf0001000);
+	void __iomem *gic_cpu_base = __io(0xf0000100);
 	void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE);
 
-	gic_init(0, 29, gic_base, gic_base);
+	gic_init(0, 29, gic_dist_base, gic_cpu_base);
 
 	register_intc_controller(&intcs_desc);
 
diff --git a/arch/arm/mach-tegra/gpio.c b/arch/arm/mach-tegra/gpio.c
index bd06620..ad80488 100644
--- a/arch/arm/mach-tegra/gpio.c
+++ b/arch/arm/mach-tegra/gpio.c
@@ -207,9 +207,9 @@
 	spin_unlock_irqrestore(&bank->lvl_lock[port], flags);
 
 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
-		__set_irq_handler_unlocked(irq, handle_level_irq);
+		__set_irq_handler_unlocked(d->irq, handle_level_irq);
 	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
-		__set_irq_handler_unlocked(irq, handle_edge_irq);
+		__set_irq_handler_unlocked(d->irq, handle_edge_irq);
 
 	return 0;
 }
diff --git a/arch/arm/mach-tegra/include/mach/clk.h b/arch/arm/mach-tegra/include/mach/clk.h
index d772395..a217f68 100644
--- a/arch/arm/mach-tegra/include/mach/clk.h
+++ b/arch/arm/mach-tegra/include/mach/clk.h
@@ -20,6 +20,8 @@
 #ifndef __MACH_CLK_H
 #define __MACH_CLK_H
 
+struct clk;
+
 void tegra_periph_reset_deassert(struct clk *c);
 void tegra_periph_reset_assert(struct clk *c);
 
diff --git a/arch/arm/mach-tegra/include/mach/clkdev.h b/arch/arm/mach-tegra/include/mach/clkdev.h
index 412f5c6..66cd3f4 100644
--- a/arch/arm/mach-tegra/include/mach/clkdev.h
+++ b/arch/arm/mach-tegra/include/mach/clkdev.h
@@ -20,6 +20,8 @@
 #ifndef __MACH_CLKDEV_H
 #define __MACH_CLKDEV_H
 
+struct clk;
+
 static inline int __clk_get(struct clk *clk)
 {
 	return 1;
diff --git a/arch/arm/mach-tegra/include/mach/kbc.h b/arch/arm/mach-tegra/include/mach/kbc.h
new file mode 100644
index 0000000..66ad276
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/kbc.h
@@ -0,0 +1,61 @@
+/*
+ * Platform definitions for tegra-kbc keyboard input driver
+ *
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef ASMARM_ARCH_TEGRA_KBC_H
+#define ASMARM_ARCH_TEGRA_KBC_H
+
+#include <linux/types.h>
+#include <linux/input/matrix_keypad.h>
+
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+#define KBC_MAX_GPIO	24
+#define KBC_MAX_KPENT	8
+#else
+#define KBC_MAX_GPIO	20
+#define KBC_MAX_KPENT	7
+#endif
+
+#define KBC_MAX_ROW	16
+#define KBC_MAX_COL	8
+#define KBC_MAX_KEY	(KBC_MAX_ROW * KBC_MAX_COL)
+
+struct tegra_kbc_pin_cfg {
+	bool is_row;
+	unsigned char num;
+};
+
+struct tegra_kbc_wake_key {
+	u8 row:4;
+	u8 col:4;
+};
+
+struct tegra_kbc_platform_data {
+	unsigned int debounce_cnt;
+	unsigned int repeat_cnt;
+
+	unsigned int wake_cnt; /* 0:wake on any key >1:wake on wake_cfg */
+	const struct tegra_kbc_wake_key *wake_cfg;
+
+	struct tegra_kbc_pin_cfg pin_cfg[KBC_MAX_GPIO];
+	const struct matrix_keymap_data *keymap_data;
+
+	bool wakeup;
+};
+#endif
diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
index de7dfad..17c74d2 100644
--- a/arch/arm/mach-tegra/irq.c
+++ b/arch/arm/mach-tegra/irq.c
@@ -46,24 +46,24 @@
 #define ICTLR_COP_IER_CLR	0x38
 #define ICTLR_COP_IEP_CLASS	0x3c
 
-static void (*gic_mask_irq)(struct irq_data *d);
-static void (*gic_unmask_irq)(struct irq_data *d);
+static void (*tegra_gic_mask_irq)(struct irq_data *d);
+static void (*tegra_gic_unmask_irq)(struct irq_data *d);
 
-#define irq_to_ictlr(irq) (((irq)-32) >> 5)
+#define irq_to_ictlr(irq) (((irq) - 32) >> 5)
 static void __iomem *tegra_ictlr_base = IO_ADDRESS(TEGRA_PRIMARY_ICTLR_BASE);
-#define ictlr_to_virt(ictlr) (tegra_ictlr_base + (ictlr)*0x100)
+#define ictlr_to_virt(ictlr) (tegra_ictlr_base + (ictlr) * 0x100)
 
 static void tegra_mask(struct irq_data *d)
 {
 	void __iomem *addr = ictlr_to_virt(irq_to_ictlr(d->irq));
-	gic_mask_irq(d);
-	writel(1<<(d->irq&31), addr+ICTLR_CPU_IER_CLR);
+	tegra_gic_mask_irq(d);
+	writel(1 << (d->irq & 31), addr+ICTLR_CPU_IER_CLR);
 }
 
 static void tegra_unmask(struct irq_data *d)
 {
 	void __iomem *addr = ictlr_to_virt(irq_to_ictlr(d->irq));
-	gic_unmask_irq(d);
+	tegra_gic_unmask_irq(d);
 	writel(1<<(d->irq&31), addr+ICTLR_CPU_IER_SET);
 }
 
@@ -98,8 +98,8 @@
 		 IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100));
 
 	gic = get_irq_chip(29);
-	gic_unmask_irq = gic->irq_unmask;
-	gic_mask_irq = gic->irq_mask;
+	tegra_gic_unmask_irq = gic->irq_unmask;
+	tegra_gic_mask_irq = gic->irq_mask;
 	tegra_irq.irq_ack = gic->irq_ack;
 #ifdef CONFIG_SMP
 	tegra_irq.irq_set_affinity = gic->irq_set_affinity;
diff --git a/arch/arm/mach-versatile/Kconfig b/arch/arm/mach-versatile/Kconfig
index 3f7b5e9..9cdec5a 100644
--- a/arch/arm/mach-versatile/Kconfig
+++ b/arch/arm/mach-versatile/Kconfig
@@ -2,17 +2,19 @@
 	depends on ARCH_VERSATILE
 
 config ARCH_VERSATILE_PB
-	bool "Support Versatile/PB platform"
+	bool "Support Versatile Platform Baseboard for ARM926EJ-S"
 	select CPU_ARM926T
 	select MIGHT_HAVE_PCI
 	default y
 	help
-	  Include support for the ARM(R) Versatile/PB platform.
+	  Include support for the ARM(R) Versatile Platform Baseboard
+	  for the ARM926EJ-S.
 
 config MACH_VERSATILE_AB
-	bool "Support Versatile/AB platform"
+	bool "Support Versatile Application Baseboard for ARM926EJ-S"
 	select CPU_ARM926T
 	help
-	  Include support for the ARM(R) Versatile/AP platform.
+	  Include support for the ARM(R) Versatile Application Baseboard
+	  for the ARM926EJ-S.
 
 endmenu
diff --git a/arch/arm/mach-vexpress/platsmp.c b/arch/arm/mach-vexpress/platsmp.c
index b1687b6..634bf1d 100644
--- a/arch/arm/mach-vexpress/platsmp.c
+++ b/arch/arm/mach-vexpress/platsmp.c
@@ -39,7 +39,7 @@
  * observers, irrespective of whether they're taking part in coherency
  * or not.  This is necessary for the hotplug code to work reliably.
  */
-static void write_pen_release(int val)
+static void __cpuinit write_pen_release(int val)
 {
 	pen_release = val;
 	smp_wmb();
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index a9ed342..1edae65 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -19,6 +19,7 @@
 #include <asm/mach/time.h>
 #include <asm/hardware/arm_timer.h>
 #include <asm/hardware/timer-sp.h>
+#include <asm/hardware/sp810.h>
 
 #include <mach/motherboard.h>
 
@@ -50,8 +51,16 @@
 
 static void __init v2m_timer_init(void)
 {
+	u32 scctrl;
+
 	versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000);
 
+	/* Select 1MHz TIMCLK as the reference clock for SP804 timers */
+	scctrl = readl(MMIO_P2V(V2M_SYSCTL + SCCTRL));
+	scctrl |= SCCTRL_TIMEREN0SEL_TIMCLK;
+	scctrl |= SCCTRL_TIMEREN1SEL_TIMCLK;
+	writel(scctrl, MMIO_P2V(V2M_SYSCTL + SCCTRL));
+
 	writel(0, MMIO_P2V(V2M_TIMER0) + TIMER_CTRL);
 	writel(0, MMIO_P2V(V2M_TIMER1) + TIMER_CTRL);
 
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 9d30c6f..e4509ba 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -405,7 +405,7 @@
 config CPU_32v6K
 	bool "Support ARM V6K processor extensions" if !SMP
 	depends on CPU_V6 || CPU_V7
-	default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
+	default y if SMP
 	help
 	  Say Y here if your ARMv6 processor supports the 'K' extension.
 	  This enables the kernel to use some instructions not present
@@ -416,7 +416,7 @@
 # ARMv7
 config CPU_V7
 	bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
-	select CPU_32v6K if !ARCH_OMAP2
+	select CPU_32v6K
 	select CPU_32v7
 	select CPU_ABRT_EV7
 	select CPU_PABRT_V7
@@ -644,7 +644,7 @@
 
 config SWP_EMULATE
 	bool "Emulate SWP/SWPB instructions"
-	depends on CPU_V7 && !CPU_V6
+	depends on !CPU_USE_DOMAINS && CPU_V7 && !CPU_V6
 	select HAVE_PROC_CPU if PROC_FS
 	default y if SMP
 	help
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 5164069..cddd684 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -297,6 +297,12 @@
 	memblock_reserve(__pa(_stext), _end - _stext);
 #endif
 #ifdef CONFIG_BLK_DEV_INITRD
+	if (phys_initrd_size &&
+	    memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
+		pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
+		       phys_initrd_start, phys_initrd_size);
+		phys_initrd_start = phys_initrd_size = 0;
+	}
 	if (phys_initrd_size) {
 		memblock_reserve(phys_initrd_start, phys_initrd_size);
 
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index 8aa9744..c074e66 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -10,8 +10,6 @@
  */
 
 #include <linux/cpumask.h>
-#include <linux/err.h>
-#include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/mutex.h>
 #include <linux/oprofile.h>
@@ -46,6 +44,7 @@
 		return NULL;
 	}
 }
+#endif
 
 static int report_trace(struct stackframe *frame, void *d)
 {
@@ -85,7 +84,7 @@
 
 	/* frame pointers should strictly progress back up the stack
 	 * (towards higher addresses) */
-	if (tail >= buftail[0].fp)
+	if (tail + 1 >= buftail[0].fp)
 		return NULL;
 
 	return buftail[0].fp-1;
@@ -111,6 +110,7 @@
 
 int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
+	/* provide backtrace support also in timer mode: */
 	ops->backtrace		= arm_backtrace;
 
 	return oprofile_perf_init(ops);
@@ -120,11 +120,3 @@
 {
 	oprofile_perf_exit();
 }
-#else
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
-	pr_info("oprofile: hardware counters not available\n");
-	return -ENODEV;
-}
-void __exit oprofile_arch_exit(void) {}
-#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/arm/plat-mxc/include/mach/uncompress.h b/arch/arm/plat-mxc/include/mach/uncompress.h
index 3a70ebf..ff469c4 100644
--- a/arch/arm/plat-mxc/include/mach/uncompress.h
+++ b/arch/arm/plat-mxc/include/mach/uncompress.h
@@ -95,6 +95,7 @@
 	case MACH_TYPE_MX35_3DS:
 	case MACH_TYPE_PCM043:
 	case MACH_TYPE_LILLY1131:
+	case MACH_TYPE_VPR200:
 		uart_base = MX3X_UART1_BASE_ADDR;
 		break;
 	case MACH_TYPE_MAGX_ZN5:
@@ -102,6 +103,7 @@
 		break;
 	case MACH_TYPE_MX51_BABBAGE:
 	case MACH_TYPE_EUKREA_CPUIMX51SD:
+	case MACH_TYPE_MX51_3DS:
 		uart_base = MX51_UART1_BASE_ADDR;
 		break;
 	case MACH_TYPE_MX50_RDP:
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index 18fe3cb..b6333ae 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -144,12 +144,9 @@
 config OMAP_IOMMU_IVA2
 	bool
 
-choice
-	prompt "System timer"
-	default OMAP_32K_TIMER if !ARCH_OMAP15XX
-
 config OMAP_MPU_TIMER
 	bool "Use mpu timer"
+	depends on ARCH_OMAP1
 	help
 	  Select this option if you want to use the OMAP mpu timer. This
 	  timer provides more intra-tick resolution than the 32KHz timer,
@@ -158,6 +155,7 @@
 config OMAP_32K_TIMER
 	bool "Use 32KHz timer"
 	depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS
+	default y if (ARCH_OMAP16XX || ARCH_OMAP2PLUS)
 	help
 	  Select this option if you want to enable the OMAP 32KHz timer.
 	  This timer saves power compared to the OMAP_MPU_TIMER, and has
@@ -165,8 +163,6 @@
 	  intra-tick resolution than OMAP_MPU_TIMER. The 32KHz timer is
 	  currently only available for OMAP16XX, 24XX, 34XX and OMAP4.
 
-endchoice
-
 config OMAP3_L2_AUX_SECURE_SAVE_RESTORE
 	bool "OMAP3 HS/EMU save and restore for L2 AUX control register"
 	depends on ARCH_OMAP3 && PM
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index ea46440..862dda9 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -36,8 +36,6 @@
 
 #define OMAP16XX_TIMER_32K_SYNCHRONIZED		0xfffbc410
 
-#if !(defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP15XX))
-
 #include <linux/clocksource.h>
 
 /*
@@ -122,12 +120,24 @@
 #define SC_MULT		4000000000u
 #define SC_SHIFT	17
 
-unsigned long long notrace sched_clock(void)
+static inline unsigned long long notrace _omap_32k_sched_clock(void)
 {
 	u32 cyc = clocksource_32k.read(&clocksource_32k);
 	return cyc_to_fixed_sched_clock(&cd, cyc, (u32)~0, SC_MULT, SC_SHIFT);
 }
 
+#ifndef CONFIG_OMAP_MPU_TIMER
+unsigned long long notrace sched_clock(void)
+{
+	return _omap_32k_sched_clock();
+}
+#else
+unsigned long long notrace omap_32k_sched_clock(void)
+{
+	return _omap_32k_sched_clock();
+}
+#endif
+
 static void notrace omap_update_sched_clock(void)
 {
 	u32 cyc = clocksource_32k.read(&clocksource_32k);
@@ -160,7 +170,7 @@
 	*ts = *tsp;
 }
 
-static int __init omap_init_clocksource_32k(void)
+int __init omap_init_clocksource_32k(void)
 {
 	static char err[] __initdata = KERN_ERR
 			"%s: can't register clocksource!\n";
@@ -195,7 +205,3 @@
 	}
 	return 0;
 }
-arch_initcall(omap_init_clocksource_32k);
-
-#endif	/* !(defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP15XX)) */
-
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index c4b2b47..8536308 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -53,7 +53,7 @@
 #endif
 
 #define OMAP_DMA_ACTIVE			0x01
-#define OMAP2_DMA_CSR_CLEAR_MASK	0xffe
+#define OMAP2_DMA_CSR_CLEAR_MASK	0xffffffff
 
 #define OMAP_FUNC_MUX_ARM_BASE		(0xfffe1000 + 0xec)
 
@@ -1873,7 +1873,7 @@
 		printk(KERN_INFO "DMA misaligned error with device %d\n",
 		       dma_chan[ch].dev_id);
 
-	p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, ch);
+	p->dma_write(status, CSR, ch);
 	p->dma_write(1 << ch, IRQSTATUS_L0, ch);
 	/* read back the register to flush the write */
 	p->dma_read(IRQSTATUS_L0, ch);
@@ -1893,10 +1893,9 @@
 			OMAP_DMA_CHAIN_INCQHEAD(chain_id);
 
 		status = p->dma_read(CSR, ch);
+		p->dma_write(status, CSR, ch);
 	}
 
-	p->dma_write(status, CSR, ch);
-
 	if (likely(dma_chan[ch].callback != NULL))
 		dma_chan[ch].callback(ch, status, dma_chan[ch].data);
 
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index 6b8088e..29b2afb 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -35,6 +35,9 @@
 
 extern void omap_map_common_io(void);
 extern struct sys_timer omap_timer;
+extern bool omap_32k_timer_init(void);
+extern int __init omap_init_clocksource_32k(void);
+extern unsigned long long notrace omap_32k_sched_clock(void);
 
 extern void omap_reserve(void);
 
diff --git a/arch/arm/plat-pxa/mfp.c b/arch/arm/plat-pxa/mfp.c
index b77e018..a9aa5ad 100644
--- a/arch/arm/plat-pxa/mfp.c
+++ b/arch/arm/plat-pxa/mfp.c
@@ -139,10 +139,11 @@
 #define mfp_configured(p)	((p)->config != -1)
 
 /*
- * perform a read-back of any MFPR register to make sure the
+ * perform a read-back of any valid MFPR register to make sure the
  * previous writings are finished
  */
-#define mfpr_sync()	(void)__raw_readl(mfpr_mmio_base + 0)
+static unsigned long mfpr_off_readback;
+#define mfpr_sync()	(void)__raw_readl(mfpr_mmio_base + mfpr_off_readback)
 
 static inline void __mfp_config_run(struct mfp_pin *p)
 {
@@ -248,6 +249,9 @@
 
 	spin_lock_irqsave(&mfp_spin_lock, flags);
 
+	/* mfp offset for readback */
+	mfpr_off_readback = map[0].offset;
+
 	for (p = map; p->start != MFP_PIN_INVALID; p++) {
 		offset = p->offset;
 		i = p->start;
diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig
index deb3995..557f8c5 100644
--- a/arch/arm/plat-s5p/Kconfig
+++ b/arch/arm/plat-s5p/Kconfig
@@ -37,6 +37,14 @@
 	help
 	  Common code for the GPIO interrupts (other than external interrupts.)
 
+comment "System MMU"
+
+config S5P_SYSTEM_MMU
+	bool "S5P SYSTEM MMU"
+	depends on ARCH_S5PV310
+	help
+	  Say Y here if you want to enable System MMU
+
 config S5P_DEV_FIMC0
 	bool
 	help
@@ -66,19 +74,3 @@
 	bool
 	help
 	  Compile in platform device definitions for MIPI-CSIS channel 1
-
-menuconfig S5P_SYSMMU
-	bool "SYSMMU support"
-	depends on ARCH_S5PV310
-	help
-	  This is a System MMU driver for Samsung ARM based Soc.
-
-if S5P_SYSMMU
-
-config S5P_SYSMMU_DEBUG
-	bool "Enables debug messages"
-	depends on S5P_SYSMMU
-	help
-	  This enables SYSMMU driver debug massages.
-
-endif
diff --git a/arch/arm/plat-s5p/Makefile b/arch/arm/plat-s5p/Makefile
index 92efe1a..4bd5cf9 100644
--- a/arch/arm/plat-s5p/Makefile
+++ b/arch/arm/plat-s5p/Makefile
@@ -19,6 +19,7 @@
 obj-y				+= irq.o
 obj-$(CONFIG_S5P_EXT_INT)	+= irq-eint.o
 obj-$(CONFIG_S5P_GPIO_INT)	+= irq-gpioint.o
+obj-$(CONFIG_S5P_SYSTEM_MMU)	+= sysmmu.o
 obj-$(CONFIG_PM)		+= pm.o
 obj-$(CONFIG_PM)		+= irq-pm.o
 
@@ -30,4 +31,3 @@
 obj-$(CONFIG_S5P_DEV_ONENAND)	+= dev-onenand.o
 obj-$(CONFIG_S5P_DEV_CSIS0)	+= dev-csis0.o
 obj-$(CONFIG_S5P_DEV_CSIS1)	+= dev-csis1.o
-obj-$(CONFIG_S5P_SYSMMU)	+= sysmmu.o
diff --git a/arch/arm/plat-s5p/include/plat/sysmmu.h b/arch/arm/plat-s5p/include/plat/sysmmu.h
deleted file mode 100644
index db298fc..0000000
--- a/arch/arm/plat-s5p/include/plat/sysmmu.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* linux/arch/arm/plat-s5p/include/plat/sysmmu.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *		http://www.samsung.com/
- *
- * Samsung sysmmu driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_PLAT_S5P_SYSMMU_H
-#define __ASM_PLAT_S5P_SYSMMU_H __FILE__
-
-/* debug macro */
-#ifdef CONFIG_S5P_SYSMMU_DEBUG
-#define sysmmu_debug(fmt, arg...)	printk(KERN_INFO "[%s] " fmt, __func__, ## arg)
-#else
-#define sysmmu_debug(fmt, arg...)	do { } while (0)
-#endif
-
-#endif /* __ASM_PLAT_S5P_SYSMMU_H */
diff --git a/arch/arm/plat-s5p/sysmmu.c b/arch/arm/plat-s5p/sysmmu.c
index d804914..ffe8a48 100644
--- a/arch/arm/plat-s5p/sysmmu.c
+++ b/arch/arm/plat-s5p/sysmmu.c
@@ -16,8 +16,6 @@
 #include <mach/regs-sysmmu.h>
 #include <mach/sysmmu.h>
 
-#include <plat/sysmmu.h>
-
 struct sysmmu_controller s5p_sysmmu_cntlrs[S5P_SYSMMU_TOTAL_IPNUM];
 
 void s5p_sysmmu_register(struct sysmmu_controller *sysmmuconp)
@@ -123,7 +121,7 @@
 		: "=r" (pg) : : "cc");		\
 		pg &= ~0x3fff;
 
-	sysmmu_debug("CP15 TTBR0 : 0x%x\n", pg);
+	printk(KERN_INFO "%s: CP15 TTBR0 : 0x%x\n", __func__, pg);
 
 	/* Set sysmmu page table base address */
 	__raw_writel(pg, sysmmuconp->regs + S5P_PT_BASE_ADDR);
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h
index d9025e3..30518cc 100644
--- a/arch/arm/plat-samsung/include/plat/pm.h
+++ b/arch/arm/plat-samsung/include/plat/pm.h
@@ -17,6 +17,8 @@
 
 #include <linux/irq.h>
 
+struct sys_device;
+
 #ifdef CONFIG_PM
 
 extern __init int s3c_pm_init(void);
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index 2fea897..9d6feaa 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -12,7 +12,7 @@
 #
 #   http://www.arm.linux.org.uk/developer/machines/?action=new
 #
-# Last update: Sun Dec 12 23:24:27 2010
+# Last update: Mon Feb 7 08:59:27 2011
 #
 # machine_is_xxx	CONFIG_xxxx		MACH_TYPE_xxx		number
 #
@@ -2240,7 +2240,7 @@
 vs_v210			MACH_VS_V210		VS_V210			2252
 vs_v212			MACH_VS_V212		VS_V212			2253
 hmt			MACH_HMT		HMT			2254
-suen3			MACH_SUEN3		SUEN3			2255
+km_kirkwood		MACH_KM_KIRKWOOD	KM_KIRKWOOD		2255
 vesper			MACH_VESPER		VESPER			2256
 str9			MACH_STR9		STR9			2257
 omap3_wl_ff		MACH_OMAP3_WL_FF	OMAP3_WL_FF		2258
@@ -2987,7 +2987,7 @@
 ea20			MACH_EA20		EA20			3002
 awm2			MACH_AWM2		AWM2			3003
 ti8148evm		MACH_TI8148EVM		TI8148EVM		3004
-tegra_seaboard		MACH_TEGRA_SEABOARD	TEGRA_SEABOARD		3005
+seaboard		MACH_SEABOARD		SEABOARD		3005
 linkstation_chlv2	MACH_LINKSTATION_CHLV2	LINKSTATION_CHLV2	3006
 tera_pro2_rack		MACH_TERA_PRO2_RACK	TERA_PRO2_RACK		3007
 rubys			MACH_RUBYS		RUBYS			3008
@@ -3190,7 +3190,7 @@
 ics_if_voip		MACH_ICS_IF_VOIP	ICS_IF_VOIP		3206
 wlf_cragg_6410		MACH_WLF_CRAGG_6410	WLF_CRAGG_6410		3207
 punica			MACH_PUNICA		PUNICA			3208
-sbc_nt250		MACH_SBC_NT250		SBC_NT250		3209
+trimslice		MACH_TRIMSLICE		TRIMSLICE		3209
 mx27_wmultra		MACH_MX27_WMULTRA	MX27_WMULTRA		3210
 mackerel		MACH_MACKEREL		MACKEREL		3211
 fa9x27			MACH_FA9X27		FA9X27			3213
@@ -3219,3 +3219,100 @@
 pcm048			MACH_PCM048		PCM048			3236
 dds			MACH_DDS		DDS			3237
 chalten_xa1		MACH_CHALTEN_XA1	CHALTEN_XA1		3238
+ts48xx			MACH_TS48XX		TS48XX			3239
+tonga2_tfttimer		MACH_TONGA2_TFTTIMER	TONGA2_TFTTIMER		3240
+whistler		MACH_WHISTLER		WHISTLER		3241
+asl_phoenix		MACH_ASL_PHOENIX	ASL_PHOENIX		3242
+at91sam9263otlite	MACH_AT91SAM9263OTLITE	AT91SAM9263OTLITE	3243
+ddplug			MACH_DDPLUG		DDPLUG			3244
+d2plug			MACH_D2PLUG		D2PLUG			3245
+kzm9d			MACH_KZM9D		KZM9D			3246
+verdi_lte		MACH_VERDI_LTE		VERDI_LTE		3247
+nanozoom		MACH_NANOZOOM		NANOZOOM		3248
+dm3730_som_lv		MACH_DM3730_SOM_LV	DM3730_SOM_LV		3249
+dm3730_torpedo		MACH_DM3730_TORPEDO	DM3730_TORPEDO		3250
+anchovy			MACH_ANCHOVY		ANCHOVY			3251
+re2rev20		MACH_RE2REV20		RE2REV20		3253
+re2rev21		MACH_RE2REV21		RE2REV21		3254
+cns21xx			MACH_CNS21XX		CNS21XX			3255
+rider			MACH_RIDER		RIDER			3257
+nsk330			MACH_NSK330		NSK330			3258
+cns2133evb		MACH_CNS2133EVB		CNS2133EVB		3259
+z3_816x_mod		MACH_Z3_816X_MOD	Z3_816X_MOD		3260
+z3_814x_mod		MACH_Z3_814X_MOD	Z3_814X_MOD		3261
+beect			MACH_BEECT		BEECT			3262
+dma_thunderbug		MACH_DMA_THUNDERBUG	DMA_THUNDERBUG		3263
+omn_at91sam9g20		MACH_OMN_AT91SAM9G20	OMN_AT91SAM9G20		3264
+mx25_e2s_uc		MACH_MX25_E2S_UC	MX25_E2S_UC		3265
+mione			MACH_MIONE		MIONE			3266
+top9000_tcu		MACH_TOP9000_TCU	TOP9000_TCU		3267
+top9000_bsl		MACH_TOP9000_BSL	TOP9000_BSL		3268
+kingdom			MACH_KINGDOM		KINGDOM			3269
+armadillo460		MACH_ARMADILLO460	ARMADILLO460		3270
+lq2			MACH_LQ2		LQ2			3271
+sweda_tms2		MACH_SWEDA_TMS2		SWEDA_TMS2		3272
+mx53_loco		MACH_MX53_LOCO		MX53_LOCO		3273
+acer_a8			MACH_ACER_A8		ACER_A8			3275
+acer_gauguin		MACH_ACER_GAUGUIN	ACER_GAUGUIN		3276
+guppy			MACH_GUPPY		GUPPY			3277
+mx61_ard		MACH_MX61_ARD		MX61_ARD		3278
+tx53			MACH_TX53		TX53			3279
+omapl138_case_a3	MACH_OMAPL138_CASE_A3	OMAPL138_CASE_A3	3280
+uemd			MACH_UEMD		UEMD			3281
+ccwmx51mut		MACH_CCWMX51MUT		CCWMX51MUT		3282
+rockhopper		MACH_ROCKHOPPER		ROCKHOPPER		3283
+nookcolor		MACH_NOOKCOLOR		NOOKCOLOR		3284
+hkdkc100		MACH_HKDKC100		HKDKC100		3285
+ts42xx			MACH_TS42XX		TS42XX			3286
+aebl			MACH_AEBL		AEBL			3287
+wario			MACH_WARIO		WARIO			3288
+gfs_spm			MACH_GFS_SPM		GFS_SPM			3289
+cm_t3730		MACH_CM_T3730		CM_T3730		3290
+isc3			MACH_ISC3		ISC3			3291
+rascal			MACH_RASCAL		RASCAL			3292
+hrefv60			MACH_HREFV60		HREFV60			3293
+tpt_2_0			MACH_TPT_2_0		TPT_2_0			3294
+pyramid_td		MACH_PYRAMID_TD		PYRAMID_TD		3295
+splendor		MACH_SPLENDOR		SPLENDOR		3296
+guf_planet		MACH_GUF_PLANET		GUF_PLANET		3297
+msm8x60_qt		MACH_MSM8X60_QT		MSM8X60_QT		3298
+htc_hd_mini		MACH_HTC_HD_MINI	HTC_HD_MINI		3299
+athene			MACH_ATHENE		ATHENE			3300
+deep_r_ek_1		MACH_DEEP_R_EK_1	DEEP_R_EK_1		3301
+vivow_ct		MACH_VIVOW_CT		VIVOW_CT		3302
+nery_1000		MACH_NERY_1000		NERY_1000		3303
+rfl109145_ssrv		MACH_RFL109145_SSRV	RFL109145_SSRV		3304
+nmh			MACH_NMH		NMH			3305
+wn802t			MACH_WN802T		WN802T			3306
+dragonet		MACH_DRAGONET		DRAGONET		3307
+geneva_b		MACH_GENEVA_B		GENEVA_B		3308
+at91sam9263desk16l	MACH_AT91SAM9263DESK16L	AT91SAM9263DESK16L	3309
+bcmhana_sv		MACH_BCMHANA_SV		BCMHANA_SV		3310
+bcmhana_tablet		MACH_BCMHANA_TABLET	BCMHANA_TABLET		3311
+koi			MACH_KOI		KOI			3312
+ts4800			MACH_TS4800		TS4800			3313
+tqma9263		MACH_TQMA9263		TQMA9263		3314
+holiday			MACH_HOLIDAY		HOLIDAY			3315
+dma_6410		MACH_DMA6410		DMA6410			3316
+pcats_overlay		MACH_PCATS_OVERLAY	PCATS_OVERLAY		3317
+hwgw6410		MACH_HWGW6410		HWGW6410		3318
+shenzhou		MACH_SHENZHOU		SHENZHOU		3319
+cwme9210		MACH_CWME9210		CWME9210		3320
+cwme9210js		MACH_CWME9210JS		CWME9210JS		3321
+pgs_v1			MACH_PGS_SITARA		PGS_SITARA		3322
+colibri_tegra2		MACH_COLIBRI_TEGRA2	COLIBRI_TEGRA2		3323
+w21			MACH_W21		W21			3324
+polysat1		MACH_POLYSAT1		POLYSAT1		3325
+dataway			MACH_DATAWAY		DATAWAY			3326
+cobral138		MACH_COBRAL138		COBRAL138		3327
+roverpcs8		MACH_ROVERPCS8		ROVERPCS8		3328
+marvelc			MACH_MARVELC		MARVELC			3329
+navefihid		MACH_NAVEFIHID		NAVEFIHID		3330
+dm365_cv100		MACH_DM365_CV100	DM365_CV100		3331
+able			MACH_ABLE		ABLE			3332
+legacy			MACH_LEGACY		LEGACY			3333
+icong			MACH_ICONG		ICONG			3334
+rover_g8		MACH_ROVER_G8		ROVER_G8		3335
+t5388p			MACH_T5388P		T5388P			3336
+dingo			MACH_DINGO		DINGO			3337
+goflexhome		MACH_GOFLEXHOME		GOFLEXHOME		3338
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index 313b130..cd2062f 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -1,8 +1,8 @@
 config AVR32
 	def_bool y
-	# With EMBEDDED=n, we get lots of stuff automatically selected
+	# With EXPERT=n, we get lots of stuff automatically selected
 	# that we usually don't need on AVR32.
-	select EMBEDDED
+	select EXPERT
 	select HAVE_CLK
 	select HAVE_OPROFILE
 	select HAVE_KPROBES
diff --git a/arch/avr32/include/asm/pgalloc.h b/arch/avr32/include/asm/pgalloc.h
index 92ecd84..bc7e8ae 100644
--- a/arch/avr32/include/asm/pgalloc.h
+++ b/arch/avr32/include/asm/pgalloc.h
@@ -8,6 +8,7 @@
 #ifndef __ASM_AVR32_PGALLOC_H
 #define __ASM_AVR32_PGALLOC_H
 
+#include <linux/mm.h>
 #include <linux/quicklist.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 0a221d4..c09577d 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -30,6 +30,9 @@
 	select HAVE_KERNEL_LZO if RAMKERNEL
 	select HAVE_OPROFILE
 	select ARCH_WANT_OPTIONAL_GPIOLIB
+	select HAVE_GENERIC_HARDIRQS
+	select GENERIC_IRQ_PROBE
+	select IRQ_PER_CPU if SMP
 
 config GENERIC_CSUM
 	def_bool y
@@ -44,15 +47,6 @@
 config GENERIC_FIND_NEXT_BIT
 	def_bool y
 
-config GENERIC_HARDIRQS
-	def_bool y
-
-config GENERIC_IRQ_PROBE
-	def_bool y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
 config GENERIC_GPIO
 	def_bool y
 
@@ -254,11 +248,6 @@
 	depends on SMP && HOTPLUG
 	default y
 
-config IRQ_PER_CPU
-	bool
-	depends on SMP
-	default y
-
 config HAVE_LEGACY_PER_CPU_AREA
 	def_bool y
 	depends on SMP
diff --git a/arch/blackfin/configs/BF518F-EZBRD_defconfig b/arch/blackfin/configs/BF518F-EZBRD_defconfig
index c0b988e..db8d38a 100644
--- a/arch/blackfin/configs/BF518F-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF518F-EZBRD_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig
index 864af5b..3e50d78 100644
--- a/arch/blackfin/configs/BF526-EZBRD_defconfig
+++ b/arch/blackfin/configs/BF526-EZBRD_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF527-AD7160-EVAL_defconfig b/arch/blackfin/configs/BF527-AD7160-EVAL_defconfig
index 7b6a337..362f59d 100644
--- a/arch/blackfin/configs/BF527-AD7160-EVAL_defconfig
+++ b/arch/blackfin/configs/BF527-AD7160-EVAL_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_ELF_CORE is not set
 # CONFIG_AIO is not set
 CONFIG_SLAB=y
diff --git a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
index 4faa6b4..023ff0d 100644
--- a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig
index 9d893eb..4e5a121 100644
--- a/arch/blackfin/configs/BF527-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF527-EZKIT_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF527-TLL6527M_defconfig b/arch/blackfin/configs/BF527-TLL6527M_defconfig
index 97a2767..cd0636b 100644
--- a/arch/blackfin/configs/BF527-TLL6527M_defconfig
+++ b/arch/blackfin/configs/BF527-TLL6527M_defconfig
@@ -6,7 +6,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF533-EZKIT_defconfig b/arch/blackfin/configs/BF533-EZKIT_defconfig
index f847743..9f8fc84 100644
--- a/arch/blackfin/configs/BF533-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF533-EZKIT_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF533-STAMP_defconfig b/arch/blackfin/configs/BF533-STAMP_defconfig
index 0e7262c..ccc432b 100644
--- a/arch/blackfin/configs/BF533-STAMP_defconfig
+++ b/arch/blackfin/configs/BF533-STAMP_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF537-STAMP_defconfig b/arch/blackfin/configs/BF537-STAMP_defconfig
index 4d14a00..5666954 100644
--- a/arch/blackfin/configs/BF537-STAMP_defconfig
+++ b/arch/blackfin/configs/BF537-STAMP_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF538-EZKIT_defconfig b/arch/blackfin/configs/BF538-EZKIT_defconfig
index fbee9d7..ac22124 100644
--- a/arch/blackfin/configs/BF538-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF538-EZKIT_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig
index 05dd11d..944404b 100644
--- a/arch/blackfin/configs/BF548-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF548-EZKIT_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF561-ACVILON_defconfig b/arch/blackfin/configs/BF561-ACVILON_defconfig
index bcb14d1..b7c8451 100644
--- a/arch/blackfin/configs/BF561-ACVILON_defconfig
+++ b/arch/blackfin/configs/BF561-ACVILON_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
index 4cf4510..7e67ba3 100644
--- a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BF561-EZKIT_defconfig b/arch/blackfin/configs/BF561-EZKIT_defconfig
index 843aaa5..141e593 100644
--- a/arch/blackfin/configs/BF561-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF561-EZKIT_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig
index dae7adf..97ebe09 100644
--- a/arch/blackfin/configs/BlackStamp_defconfig
+++ b/arch/blackfin/configs/BlackStamp_defconfig
@@ -6,7 +6,7 @@
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/CM-BF527_defconfig b/arch/blackfin/configs/CM-BF527_defconfig
index f341424..c245754 100644
--- a/arch/blackfin/configs/CM-BF527_defconfig
+++ b/arch/blackfin/configs/CM-BF527_defconfig
@@ -8,7 +8,7 @@
 # CONFIG_RD_GZIP is not set
 CONFIG_RD_LZMA=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/CM-BF533_defconfig b/arch/blackfin/configs/CM-BF533_defconfig
index 8c7e08f..baf1c15 100644
--- a/arch/blackfin/configs/CM-BF533_defconfig
+++ b/arch/blackfin/configs/CM-BF533_defconfig
@@ -7,7 +7,7 @@
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_RD_GZIP is not set
 CONFIG_RD_LZMA=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_UID16 is not set
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/CM-BF537E_defconfig b/arch/blackfin/configs/CM-BF537E_defconfig
index bd3cb76..707cbf8 100644
--- a/arch/blackfin/configs/CM-BF537E_defconfig
+++ b/arch/blackfin/configs/CM-BF537E_defconfig
@@ -8,7 +8,7 @@
 # CONFIG_RD_GZIP is not set
 CONFIG_RD_LZMA=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_UID16 is not set
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/CM-BF537U_defconfig b/arch/blackfin/configs/CM-BF537U_defconfig
index 82224f3..4596935 100644
--- a/arch/blackfin/configs/CM-BF537U_defconfig
+++ b/arch/blackfin/configs/CM-BF537U_defconfig
@@ -8,7 +8,7 @@
 # CONFIG_RD_GZIP is not set
 CONFIG_RD_LZMA=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_UID16 is not set
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/CM-BF548_defconfig b/arch/blackfin/configs/CM-BF548_defconfig
index 433598c..df26758 100644
--- a/arch/blackfin/configs/CM-BF548_defconfig
+++ b/arch/blackfin/configs/CM-BF548_defconfig
@@ -8,7 +8,7 @@
 # CONFIG_RD_GZIP is not set
 CONFIG_RD_LZMA=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_UID16 is not set
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/CM-BF561_defconfig b/arch/blackfin/configs/CM-BF561_defconfig
index ded7d84..6c7b215 100644
--- a/arch/blackfin/configs/CM-BF561_defconfig
+++ b/arch/blackfin/configs/CM-BF561_defconfig
@@ -8,7 +8,7 @@
 # CONFIG_RD_GZIP is not set
 CONFIG_RD_LZMA=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_UID16 is not set
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/DNP5370_defconfig b/arch/blackfin/configs/DNP5370_defconfig
index 0ebc7d9..f503136 100644
--- a/arch/blackfin/configs/DNP5370_defconfig
+++ b/arch/blackfin/configs/DNP5370_defconfig
@@ -5,7 +5,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLOB=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/blackfin/configs/H8606_defconfig b/arch/blackfin/configs/H8606_defconfig
index 700fb70..7450127 100644
--- a/arch/blackfin/configs/H8606_defconfig
+++ b/arch/blackfin/configs/H8606_defconfig
@@ -2,7 +2,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/IP0X_defconfig b/arch/blackfin/configs/IP0X_defconfig
index b40156d..5e797cf 100644
--- a/arch/blackfin/configs/IP0X_defconfig
+++ b/arch/blackfin/configs/IP0X_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/PNAV-10_defconfig b/arch/blackfin/configs/PNAV-10_defconfig
index be866d9..a566a2f 100644
--- a/arch/blackfin/configs/PNAV-10_defconfig
+++ b/arch/blackfin/configs/PNAV-10_defconfig
@@ -2,7 +2,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/SRV1_defconfig b/arch/blackfin/configs/SRV1_defconfig
index b64bdf7..8538095 100644
--- a/arch/blackfin/configs/SRV1_defconfig
+++ b/arch/blackfin/configs/SRV1_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/configs/TCM-BF518_defconfig b/arch/blackfin/configs/TCM-BF518_defconfig
index 1bccd9a..d496ae9 100644
--- a/arch/blackfin/configs/TCM-BF518_defconfig
+++ b/arch/blackfin/configs/TCM-BF518_defconfig
@@ -7,7 +7,7 @@
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_RD_GZIP is not set
 CONFIG_RD_LZMA=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/blackfin/configs/TCM-BF537_defconfig b/arch/blackfin/configs/TCM-BF537_defconfig
index 00ce899..65f6421 100644
--- a/arch/blackfin/configs/TCM-BF537_defconfig
+++ b/arch/blackfin/configs/TCM-BF537_defconfig
@@ -8,7 +8,7 @@
 # CONFIG_RD_GZIP is not set
 CONFIG_RD_LZMA=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_UID16 is not set
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
diff --git a/arch/blackfin/include/asm/bfin_serial.h b/arch/blackfin/include/asm/bfin_serial.h
index 1ff9f14..7dbc664 100644
--- a/arch/blackfin/include/asm/bfin_serial.h
+++ b/arch/blackfin/include/asm/bfin_serial.h
@@ -10,6 +10,7 @@
 #define __BFIN_ASM_SERIAL_H__
 
 #include <linux/serial_core.h>
+#include <linux/spinlock.h>
 #include <mach/anomaly.h>
 #include <mach/bfin_serial.h>
 
@@ -41,6 +42,7 @@
 	struct circ_buf rx_dma_buf;
 	struct timer_list rx_dma_timer;
 	int rx_dma_nrows;
+	spinlock_t rx_lock;
 	unsigned int tx_dma_channel;
 	unsigned int rx_dma_channel;
 	struct work_struct tx_dma_workqueue;
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 613e628..0a7a4c1 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -54,6 +54,8 @@
 	bool
 	default y
 	select HAVE_IDE
+	select HAVE_GENERIC_HARDIRQS
+	select GENERIC_HARDIRQS_NO_DEPRECATED
 
 config HZ
 	int
@@ -67,10 +69,6 @@
 
 source "fs/Kconfig.binfmt"
 
-config GENERIC_HARDIRQS
-	bool
-	default y
-
 config ETRAX_CMDLINE
 	string "Kernel command line"
 	default "root=/dev/mtdblock3"
diff --git a/arch/cris/arch-v10/kernel/irq.c b/arch/cris/arch-v10/kernel/irq.c
index a0c0df8..7328a7c 100644
--- a/arch/cris/arch-v10/kernel/irq.c
+++ b/arch/cris/arch-v10/kernel/irq.c
@@ -104,43 +104,21 @@
 	IRQ31_interrupt
 };
 
-static void enable_crisv10_irq(unsigned int irq);
-
-static unsigned int startup_crisv10_irq(unsigned int irq)
+static void enable_crisv10_irq(struct irq_data *data)
 {
-	enable_crisv10_irq(irq);
-	return 0;
+	crisv10_unmask_irq(data->irq);
 }
 
-#define shutdown_crisv10_irq	disable_crisv10_irq
-
-static void enable_crisv10_irq(unsigned int irq)
+static void disable_crisv10_irq(struct irq_data *data)
 {
-	crisv10_unmask_irq(irq);
-}
-
-static void disable_crisv10_irq(unsigned int irq)
-{
-	crisv10_mask_irq(irq);
-}
-
-static void ack_crisv10_irq(unsigned int irq)
-{
-}
-
-static void end_crisv10_irq(unsigned int irq)
-{
+	crisv10_mask_irq(data->irq);
 }
 
 static struct irq_chip crisv10_irq_type = {
-	.name =        "CRISv10",
-	.startup =     startup_crisv10_irq,
-	.shutdown =    shutdown_crisv10_irq,
-	.enable =      enable_crisv10_irq,
-	.disable =     disable_crisv10_irq,
-	.ack =         ack_crisv10_irq,
-	.end =         end_crisv10_irq,
-	.set_affinity = NULL
+	.name		= "CRISv10",
+	.irq_shutdown	= disable_crisv10_irq,
+	.irq_enable	= enable_crisv10_irq,
+	.irq_disable	= disable_crisv10_irq,
 };
 
 void weird_irq(void);
@@ -221,7 +199,8 @@
 
 	/* Initialize IRQ handler descriptors. */
 	for(i = 2; i < NR_IRQS; i++) {
-		irq_desc[i].chip = &crisv10_irq_type;
+		set_irq_desc_and_handler(i, &crisv10_irq_type,
+					 handle_simple_irq);
 		set_int_vector(i, interrupt[i]);
 	}
 
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c
index 2ed48ae3d..0ad9db5 100644
--- a/arch/cris/arch-v32/kernel/irq.c
+++ b/arch/cris/arch-v32/kernel/irq.c
@@ -291,54 +291,33 @@
 }
 
 
-static unsigned int startup_crisv32_irq(unsigned int irq)
+static void enable_crisv32_irq(struct irq_data *data)
 {
-	crisv32_unmask_irq(irq);
-	return 0;
+	crisv32_unmask_irq(data->irq);
 }
 
-static void shutdown_crisv32_irq(unsigned int irq)
+static void disable_crisv32_irq(struct irq_data *data)
 {
-	crisv32_mask_irq(irq);
+	crisv32_mask_irq(data->irq);
 }
 
-static void enable_crisv32_irq(unsigned int irq)
-{
-	crisv32_unmask_irq(irq);
-}
-
-static void disable_crisv32_irq(unsigned int irq)
-{
-	crisv32_mask_irq(irq);
-}
-
-static void ack_crisv32_irq(unsigned int irq)
-{
-}
-
-static void end_crisv32_irq(unsigned int irq)
-{
-}
-
-int set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest)
+static int set_affinity_crisv32_irq(struct irq_data *data,
+				    const struct cpumask *dest, bool force)
 {
 	unsigned long flags;
-	spin_lock_irqsave(&irq_lock, flags);
-	irq_allocations[irq - FIRST_IRQ].mask = *dest;
-	spin_unlock_irqrestore(&irq_lock, flags);
 
+	spin_lock_irqsave(&irq_lock, flags);
+	irq_allocations[data->irq - FIRST_IRQ].mask = *dest;
+	spin_unlock_irqrestore(&irq_lock, flags);
 	return 0;
 }
 
 static struct irq_chip crisv32_irq_type = {
-	.name =        "CRISv32",
-	.startup =     startup_crisv32_irq,
-	.shutdown =    shutdown_crisv32_irq,
-	.enable =      enable_crisv32_irq,
-	.disable =     disable_crisv32_irq,
-	.ack =         ack_crisv32_irq,
-	.end =         end_crisv32_irq,
-	.set_affinity = set_affinity_crisv32_irq
+	.name			= "CRISv32",
+	.irq_shutdown		= disable_crisv32_irq,
+	.irq_enable		= enable_crisv32_irq,
+	.irq_disable		= disable_crisv32_irq,
+	.irq_set_affinity	= set_affinity_crisv32_irq,
 };
 
 void
@@ -472,7 +451,8 @@
 
 	/* Point all IRQ's to bad handlers. */
 	for (i = FIRST_IRQ, j = 0; j < NR_IRQS; i++, j++) {
-		irq_desc[j].chip = &crisv32_irq_type;
+		set_irq_chip_and_handler(j, &crisv32_irq_type,
+					 handle_simple_irq);
 		set_exception_vector(i, interrupt[j]);
 	}
 
diff --git a/arch/cris/configs/artpec_3_defconfig b/arch/cris/configs/artpec_3_defconfig
index 590f72c..71854d4 100644
--- a/arch/cris/configs/artpec_3_defconfig
+++ b/arch/cris/configs/artpec_3_defconfig
@@ -2,7 +2,7 @@
 # CONFIG_SWAP is not set
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/cris/configs/etrax-100lx_v2_defconfig b/arch/cris/configs/etrax-100lx_v2_defconfig
index 1b2853e..a85aabf 100644
--- a/arch/cris/configs/etrax-100lx_v2_defconfig
+++ b/arch/cris/configs/etrax-100lx_v2_defconfig
@@ -2,7 +2,7 @@
 # CONFIG_SWAP is not set
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/cris/configs/etraxfs_defconfig b/arch/cris/configs/etraxfs_defconfig
index f73d38c..87c7227 100644
--- a/arch/cris/configs/etraxfs_defconfig
+++ b/arch/cris/configs/etraxfs_defconfig
@@ -2,7 +2,7 @@
 # CONFIG_SWAP is not set
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c
index 469f7f9..c346952 100644
--- a/arch/cris/kernel/irq.c
+++ b/arch/cris/kernel/irq.c
@@ -62,7 +62,7 @@
 		for_each_online_cpu(j)
 			seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
 #endif
-		seq_printf(p, " %14s", irq_desc[i].chip->name);
+		seq_printf(p, " %14s", irq_desc[i].irq_data.chip->name);
 		seq_printf(p, "  %s", action->name);
 
 		for (action=action->next; action; action = action->next)
@@ -93,8 +93,8 @@
 		printk("do_IRQ: stack overflow: %lX\n", sp);
 		show_stack(NULL, (unsigned long *)sp);
 	}
-	__do_IRQ(irq);
-        irq_exit();
+	generic_handle_irq(irq);
+	irq_exit();
 	set_irq_regs(old_regs);
 }
 
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index f6bcb03..747499a 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -5,6 +5,7 @@
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_IRQ_WORK
 	select HAVE_PERF_EVENTS
+	select HAVE_GENERIC_HARDIRQS
 
 config ZONE_DMA
 	bool
@@ -29,14 +30,6 @@
 	bool
 	default n
 
-config GENERIC_HARDIRQS
-	bool
-	default y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	bool
-	default y
-
 config TIME_LOW_RES
 	bool
 	default y
diff --git a/arch/frv/defconfig b/arch/frv/defconfig
index b8ebe9e..b1b7926 100644
--- a/arch/frv/defconfig
+++ b/arch/frv/defconfig
@@ -3,7 +3,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 CONFIG_MMU=y
 CONFIG_FRV_OUTOFLINE_ATOMIC_OPS=y
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index 65f897d8..6df692d 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -2,6 +2,8 @@
 	bool
 	default y
 	select HAVE_IDE
+	select HAVE_GENERIC_HARDIRQS
+	select GENERIC_HARDIRQS_NO_DEPRECATED
 
 config SYMBOL_PREFIX
 	string
@@ -47,10 +49,6 @@
 	bool
 	default y
 
-config GENERIC_HARDIRQS
-	bool
-	default y
-
 config GENERIC_CALIBRATE_DELAY
 	bool
 	default y
diff --git a/arch/h8300/defconfig b/arch/h8300/defconfig
index 342f777..042425a 100644
--- a/arch/h8300/defconfig
+++ b/arch/h8300/defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_UID16 is not set
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c
index c25dc2c..7643d39 100644
--- a/arch/h8300/kernel/irq.c
+++ b/arch/h8300/kernel/irq.c
@@ -38,34 +38,30 @@
 	return (irq >= EXT_IRQ0 && irq <= (EXT_IRQ0 + EXT_IRQS));
 }
 
-static void h8300_enable_irq(unsigned int irq)
+static void h8300_enable_irq(struct irq_data *data)
 {
-	if (is_ext_irq(irq))
-		IER_REGS |= 1 << (irq - EXT_IRQ0);
+	if (is_ext_irq(data->irq))
+		IER_REGS |= 1 << (data->irq - EXT_IRQ0);
 }
 
-static void h8300_disable_irq(unsigned int irq)
+static void h8300_disable_irq(struct irq_data *data)
 {
-	if (is_ext_irq(irq))
-		IER_REGS &= ~(1 << (irq - EXT_IRQ0));
+	if (is_ext_irq(data->irq))
+		IER_REGS &= ~(1 << (data->irq - EXT_IRQ0));
 }
 
-static void h8300_end_irq(unsigned int irq)
+static unsigned int h8300_startup_irq(struct irq_data *data)
 {
-}
-
-static unsigned int h8300_startup_irq(unsigned int irq)
-{
-	if (is_ext_irq(irq))
-		return h8300_enable_irq_pin(irq);
+	if (is_ext_irq(data->irq))
+		return h8300_enable_irq_pin(data->irq);
 	else
 		return 0;
 }
 
-static void h8300_shutdown_irq(unsigned int irq)
+static void h8300_shutdown_irq(struct irq_data *data)
 {
-	if (is_ext_irq(irq))
-		h8300_disable_irq_pin(irq);
+	if (is_ext_irq(data->irq))
+		h8300_disable_irq_pin(data->irq);
 }
 
 /*
@@ -73,12 +69,10 @@
  */
 struct irq_chip h8300irq_chip = {
 	.name		= "H8300-INTC",
-	.startup	= h8300_startup_irq,
-	.shutdown	= h8300_shutdown_irq,
-	.enable		= h8300_enable_irq,
-	.disable	= h8300_disable_irq,
-	.ack		= NULL,
-	.end		= h8300_end_irq,
+	.irq_startup	= h8300_startup_irq,
+	.irq_shutdown	= h8300_shutdown_irq,
+	.irq_enable	= h8300_enable_irq,
+	.irq_disable	= h8300_disable_irq,
 };
 
 #if defined(CONFIG_RAMKERNEL)
@@ -160,18 +154,14 @@
 
 	setup_vector();
 
-	for (c = 0; c < NR_IRQS; c++) {
-		irq_desc[c].status = IRQ_DISABLED;
-		irq_desc[c].action = NULL;
-		irq_desc[c].depth = 1;
-		irq_desc[c].chip = &h8300irq_chip;
-	}
+	for (c = 0; c < NR_IRQS; c++)
+		set_irq_chip_and_handler(c, &h8300irq_chip, handle_simple_irq);
 }
 
 asmlinkage void do_IRQ(int irq)
 {
 	irq_enter();
-	__do_IRQ(irq);
+	generic_handle_irq(irq);
 	irq_exit();
 }
 
@@ -192,7 +182,7 @@
 			goto unlock;
 		seq_printf(p, "%3d: ",i);
 		seq_printf(p, "%10u ", kstat_irqs(i));
-		seq_printf(p, " %14s", irq_desc[i].chip->name);
+		seq_printf(p, " %14s", irq_desc[i].irq_data.chip->name);
 		seq_printf(p, "-%-8s", irq_desc[i].name);
 		seq_printf(p, "  %s", action->name);
 
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index e0f5b6d..fcf3b43 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -22,6 +22,10 @@
 	select HAVE_KVM
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_DMA_API_DEBUG
+	select HAVE_GENERIC_HARDIRQS
+	select GENERIC_IRQ_PROBE
+	select GENERIC_PENDING_IRQ if SMP
+	select IRQ_PER_CPU
 	default y
 	help
 	  The Itanium Processor Family is Intel's 64-bit successor to
@@ -678,28 +682,6 @@
 
 source "lib/Kconfig"
 
-#
-# Use the generic interrupt handling code in kernel/irq/:
-#
-config GENERIC_HARDIRQS
-	def_bool y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
-config GENERIC_IRQ_PROBE
-	bool
-	default y
-
-config GENERIC_PENDING_IRQ
-	bool
-	depends on GENERIC_HARDIRQS && SMP
-	default y
-
-config IRQ_PER_CPU
-	bool
-	default y
-
 config IOMMU_HELPER
 	def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB)
 
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 5c291d6..ef4c1e4 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -7,6 +7,9 @@
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_BZIP2
 	select HAVE_KERNEL_LZMA
+	select HAVE_GENERIC_HARDIRQS
+	select GENERIC_HARDIRQS_NO_DEPRECATED
+	select GENERIC_IRQ_PROBE
 
 config SBUS
 	bool
@@ -19,14 +22,6 @@
 	bool
 	default y
 
-config GENERIC_HARDIRQS
-	bool
-	default y
-
-config GENERIC_IRQ_PROBE
-	bool
-	default y
-
 config NO_IOPORT
 	def_bool y
 
diff --git a/arch/m32r/configs/m32700ut.smp_defconfig b/arch/m32r/configs/m32700ut.smp_defconfig
index 816c3ec..a3d727e 100644
--- a/arch/m32r/configs/m32700ut.smp_defconfig
+++ b/arch/m32r/configs/m32700ut.smp_defconfig
@@ -5,7 +5,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=15
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/m32700ut.up_defconfig b/arch/m32r/configs/m32700ut.up_defconfig
index 8478568..b833416 100644
--- a/arch/m32r/configs/m32700ut.up_defconfig
+++ b/arch/m32r/configs/m32700ut.up_defconfig
@@ -5,7 +5,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/mappi.nommu_defconfig b/arch/m32r/configs/mappi.nommu_defconfig
index 354a964..7c90ce2 100644
--- a/arch/m32r/configs/mappi.nommu_defconfig
+++ b/arch/m32r/configs/mappi.nommu_defconfig
@@ -3,7 +3,7 @@
 CONFIG_IKCONFIG=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/mappi.smp_defconfig b/arch/m32r/configs/mappi.smp_defconfig
index 9022307..367d07c 100644
--- a/arch/m32r/configs/mappi.smp_defconfig
+++ b/arch/m32r/configs/mappi.smp_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=15
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/mappi.up_defconfig b/arch/m32r/configs/mappi.up_defconfig
index 3726068..cb11384 100644
--- a/arch/m32r/configs/mappi.up_defconfig
+++ b/arch/m32r/configs/mappi.up_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/mappi2.opsp_defconfig b/arch/m32r/configs/mappi2.opsp_defconfig
index 6136fad..3bff779 100644
--- a/arch/m32r/configs/mappi2.opsp_defconfig
+++ b/arch/m32r/configs/mappi2.opsp_defconfig
@@ -4,7 +4,7 @@
 CONFIG_IKCONFIG=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/mappi2.vdec2_defconfig b/arch/m32r/configs/mappi2.vdec2_defconfig
index dce1fc7..75246c9 100644
--- a/arch/m32r/configs/mappi2.vdec2_defconfig
+++ b/arch/m32r/configs/mappi2.vdec2_defconfig
@@ -4,7 +4,7 @@
 CONFIG_IKCONFIG=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/mappi3.smp_defconfig b/arch/m32r/configs/mappi3.smp_defconfig
index b204e2e..27cefd4 100644
--- a/arch/m32r/configs/mappi3.smp_defconfig
+++ b/arch/m32r/configs/mappi3.smp_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=15
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/oaks32r_defconfig b/arch/m32r/configs/oaks32r_defconfig
index 5aa4ea9..5087a51 100644
--- a/arch/m32r/configs/oaks32r_defconfig
+++ b/arch/m32r/configs/oaks32r_defconfig
@@ -2,7 +2,7 @@
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/opsput_defconfig b/arch/m32r/configs/opsput_defconfig
index 8494c6a..50c6f52 100644
--- a/arch/m32r/configs/opsput_defconfig
+++ b/arch/m32r/configs/opsput_defconfig
@@ -4,7 +4,7 @@
 CONFIG_IKCONFIG=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/m32r/configs/usrv_defconfig b/arch/m32r/configs/usrv_defconfig
index 1df293b..a3cfaae 100644
--- a/arch/m32r/configs/usrv_defconfig
+++ b/arch/m32r/configs/usrv_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=15
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c
index 7db26f1..76eaf38 100644
--- a/arch/m32r/kernel/irq.c
+++ b/arch/m32r/kernel/irq.c
@@ -40,8 +40,10 @@
 	}
 
 	if (i < NR_IRQS) {
-		raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
-		action = irq_desc[i].action;
+		struct irq_desc *desc = irq_to_desc(i);
+
+		raw_spin_lock_irqsave(&desc->lock, flags);
+		action = desc->action;
 		if (!action)
 			goto skip;
 		seq_printf(p, "%3d: ",i);
@@ -51,7 +53,7 @@
 		for_each_online_cpu(j)
 			seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
 #endif
-		seq_printf(p, " %14s", irq_desc[i].chip->name);
+		seq_printf(p, " %14s", desc->irq_data.chip->name);
 		seq_printf(p, "  %s", action->name);
 
 		for (action=action->next; action; action = action->next)
@@ -59,7 +61,7 @@
 
 		seq_putc(p, '\n');
 skip:
-		raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+		raw_spin_unlock_irqrestore(&desc->lock, flags);
 	}
 	return 0;
 }
@@ -78,7 +80,7 @@
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
 	/* FIXME M32R */
 #endif
-	__do_IRQ(irq);
+	generic_handle_irq(irq);
 	irq_exit();
 	set_irq_regs(old_regs);
 
diff --git a/arch/m32r/platforms/m32104ut/setup.c b/arch/m32r/platforms/m32104ut/setup.c
index 402a59d..4a693d0 100644
--- a/arch/m32r/platforms/m32104ut/setup.c
+++ b/arch/m32r/platforms/m32104ut/setup.c
@@ -39,39 +39,30 @@
 	outl(data, port);
 }
 
-static void mask_and_ack_m32104ut(unsigned int irq)
+static void mask_m32104ut_irq(struct irq_data *data)
 {
-	disable_m32104ut_irq(irq);
+	disable_m32104ut_irq(data->irq);
 }
 
-static void end_m32104ut_irq(unsigned int irq)
+static void unmask_m32104ut_irq(struct irq_data *data)
 {
-	enable_m32104ut_irq(irq);
+	enable_m32104ut_irq(data->irq);
 }
 
-static unsigned int startup_m32104ut_irq(unsigned int irq)
+static void shutdown_m32104ut_irq(struct irq_data *data)
 {
-	enable_m32104ut_irq(irq);
-	return (0);
-}
+	unsigned int irq = data->irq;
+	unsigned long port = irq2port(irq);
 
-static void shutdown_m32104ut_irq(unsigned int irq)
-{
-	unsigned long port;
-
-	port = irq2port(irq);
 	outl(M32R_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip m32104ut_irq_type =
 {
-	.name = "M32104UT-IRQ",
-	.startup = startup_m32104ut_irq,
-	.shutdown = shutdown_m32104ut_irq,
-	.enable = enable_m32104ut_irq,
-	.disable = disable_m32104ut_irq,
-	.ack = mask_and_ack_m32104ut,
-	.end = end_m32104ut_irq
+	.name		= "M32104UT-IRQ",
+	.irq_shutdown	= shutdown_m32104ut_irq,
+	.irq_unmask	= unmask_m32104ut_irq,
+	.irq_mask	= mask_m32104ut_irq,
 };
 
 void __init init_IRQ(void)
@@ -85,36 +76,29 @@
 
 #if defined(CONFIG_SMC91X)
 	/* INT#0: LAN controller on M32104UT-LAN (SMC91C111)*/
-	irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT0].chip = &m32104ut_irq_type;
-	irq_desc[M32R_IRQ_INT0].action = 0;
-	irq_desc[M32R_IRQ_INT0].depth = 1;
-	icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD11; /* "H" level sense */
+	set_irq_chip_and_handler(M32R_IRQ_INT0, &m32104ut_irq_type,
+				 handle_level_irq);
+	/* "H" level sense */
+	cu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD11;
 	disable_m32104ut_irq(M32R_IRQ_INT0);
 #endif  /* CONFIG_SMC91X */
 
 	/* MFT2 : system timer */
-	irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_MFT2].chip = &m32104ut_irq_type;
-	irq_desc[M32R_IRQ_MFT2].action = 0;
-	irq_desc[M32R_IRQ_MFT2].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_MFT2, &m32104ut_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
 	disable_m32104ut_irq(M32R_IRQ_MFT2);
 
 #ifdef CONFIG_SERIAL_M32R_SIO
 	/* SIO0_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_R].chip = &m32104ut_irq_type;
-	irq_desc[M32R_IRQ_SIO0_R].action = 0;
-	irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &m32104ut_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_R].icucr = M32R_ICUCR_IEN;
 	disable_m32104ut_irq(M32R_IRQ_SIO0_R);
 
 	/* SIO0_S : uart send data */
-	irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_S].chip = &m32104ut_irq_type;
-	irq_desc[M32R_IRQ_SIO0_S].action = 0;
-	irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &m32104ut_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_S].icucr = M32R_ICUCR_IEN;
 	disable_m32104ut_irq(M32R_IRQ_SIO0_S);
 #endif /* CONFIG_SERIAL_M32R_SIO */
diff --git a/arch/m32r/platforms/m32700ut/setup.c b/arch/m32r/platforms/m32700ut/setup.c
index 80b1a02..2074bcc 100644
--- a/arch/m32r/platforms/m32700ut/setup.c
+++ b/arch/m32r/platforms/m32700ut/setup.c
@@ -45,39 +45,30 @@
 	outl(data, port);
 }
 
-static void mask_and_ack_m32700ut(unsigned int irq)
+static void mask_m32700ut(struct irq_data *data)
 {
-	disable_m32700ut_irq(irq);
+	disable_m32700ut_irq(data->irq);
 }
 
-static void end_m32700ut_irq(unsigned int irq)
+static void unmask_m32700ut(struct irq_data *data)
 {
-	enable_m32700ut_irq(irq);
+	enable_m32700ut_irq(data->irq);
 }
 
-static unsigned int startup_m32700ut_irq(unsigned int irq)
-{
-	enable_m32700ut_irq(irq);
-	return (0);
-}
-
-static void shutdown_m32700ut_irq(unsigned int irq)
+static void shutdown_m32700ut(struct irq_data *data)
 {
 	unsigned long port;
 
-	port = irq2port(irq);
+	port = irq2port(data->irq);
 	outl(M32R_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip m32700ut_irq_type =
 {
-	.name = "M32700UT-IRQ",
-	.startup = startup_m32700ut_irq,
-	.shutdown = shutdown_m32700ut_irq,
-	.enable = enable_m32700ut_irq,
-	.disable = disable_m32700ut_irq,
-	.ack = mask_and_ack_m32700ut,
-	.end = end_m32700ut_irq
+	.name		= "M32700UT-IRQ",
+	.irq_shutdown	= shutdown_m32700ut,
+	.irq_mask	= mask_m32700ut,
+	.irq_unmask	= unmask_m32700ut
 };
 
 /*
@@ -99,7 +90,6 @@
 	unsigned int pldirq;
 
 	pldirq = irq2pldirq(irq);
-//	disable_m32700ut_irq(M32R_IRQ_INT1);
 	port = pldirq2port(pldirq);
 	data = pld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7;
 	outw(data, port);
@@ -111,50 +101,38 @@
 	unsigned int pldirq;
 
 	pldirq = irq2pldirq(irq);
-//	enable_m32700ut_irq(M32R_IRQ_INT1);
 	port = pldirq2port(pldirq);
 	data = pld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6;
 	outw(data, port);
 }
 
-static void mask_and_ack_m32700ut_pld(unsigned int irq)
+static void mask_m32700ut_pld(struct irq_data *data)
 {
-	disable_m32700ut_pld_irq(irq);
-//	mask_and_ack_m32700ut(M32R_IRQ_INT1);
+	disable_m32700ut_pld_irq(data->irq);
 }
 
-static void end_m32700ut_pld_irq(unsigned int irq)
+static void unmask_m32700ut_pld(struct irq_data *data)
 {
-	enable_m32700ut_pld_irq(irq);
-	end_m32700ut_irq(M32R_IRQ_INT1);
+	enable_m32700ut_pld_irq(data->irq);
+	enable_m32700ut_irq(M32R_IRQ_INT1);
 }
 
-static unsigned int startup_m32700ut_pld_irq(unsigned int irq)
-{
-	enable_m32700ut_pld_irq(irq);
-	return (0);
-}
-
-static void shutdown_m32700ut_pld_irq(unsigned int irq)
+static void shutdown_m32700ut_pld_irq(struct irq_data *data)
 {
 	unsigned long port;
 	unsigned int pldirq;
 
-	pldirq = irq2pldirq(irq);
-//	shutdown_m32700ut_irq(M32R_IRQ_INT1);
+	pldirq = irq2pldirq(data->irq);
 	port = pldirq2port(pldirq);
 	outw(PLD_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip m32700ut_pld_irq_type =
 {
-	.name = "M32700UT-PLD-IRQ",
-	.startup = startup_m32700ut_pld_irq,
-	.shutdown = shutdown_m32700ut_pld_irq,
-	.enable = enable_m32700ut_pld_irq,
-	.disable = disable_m32700ut_pld_irq,
-	.ack = mask_and_ack_m32700ut_pld,
-	.end = end_m32700ut_pld_irq
+	.name		= "M32700UT-PLD-IRQ",
+	.irq_shutdown	= shutdown_m32700ut_pld_irq,
+	.irq_mask	= mask_m32700ut_pld,
+	.irq_unmask	= unmask_m32700ut_pld,
 };
 
 /*
@@ -188,42 +166,33 @@
 	outw(data, port);
 }
 
-static void mask_and_ack_m32700ut_lanpld(unsigned int irq)
+static void mask_m32700ut_lanpld(struct irq_data *data)
 {
-	disable_m32700ut_lanpld_irq(irq);
+	disable_m32700ut_lanpld_irq(data->irq);
 }
 
-static void end_m32700ut_lanpld_irq(unsigned int irq)
+static void unmask_m32700ut_lanpld(struct irq_data *data)
 {
-	enable_m32700ut_lanpld_irq(irq);
-	end_m32700ut_irq(M32R_IRQ_INT0);
+	enable_m32700ut_lanpld_irq(data->irq);
+	enable_m32700ut_irq(M32R_IRQ_INT0);
 }
 
-static unsigned int startup_m32700ut_lanpld_irq(unsigned int irq)
-{
-	enable_m32700ut_lanpld_irq(irq);
-	return (0);
-}
-
-static void shutdown_m32700ut_lanpld_irq(unsigned int irq)
+static void shutdown_m32700ut_lanpld(struct irq_data *data)
 {
 	unsigned long port;
 	unsigned int pldirq;
 
-	pldirq = irq2lanpldirq(irq);
+	pldirq = irq2lanpldirq(data->irq);
 	port = lanpldirq2port(pldirq);
 	outw(PLD_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip m32700ut_lanpld_irq_type =
 {
-	.name = "M32700UT-PLD-LAN-IRQ",
-	.startup = startup_m32700ut_lanpld_irq,
-	.shutdown = shutdown_m32700ut_lanpld_irq,
-	.enable = enable_m32700ut_lanpld_irq,
-	.disable = disable_m32700ut_lanpld_irq,
-	.ack = mask_and_ack_m32700ut_lanpld,
-	.end = end_m32700ut_lanpld_irq
+	.name		= "M32700UT-PLD-LAN-IRQ",
+	.irq_shutdown	= shutdown_m32700ut_lanpld,
+	.irq_mask	= mask_m32700ut_lanpld,
+	.irq_unmask	= unmask_m32700ut_lanpld,
 };
 
 /*
@@ -257,143 +226,110 @@
 	outw(data, port);
 }
 
-static void mask_and_ack_m32700ut_lcdpld(unsigned int irq)
+static void mask_m32700ut_lcdpld(struct irq_data *data)
 {
-	disable_m32700ut_lcdpld_irq(irq);
+	disable_m32700ut_lcdpld_irq(data->irq);
 }
 
-static void end_m32700ut_lcdpld_irq(unsigned int irq)
+static void unmask_m32700ut_lcdpld(struct irq_data *data)
 {
-	enable_m32700ut_lcdpld_irq(irq);
-	end_m32700ut_irq(M32R_IRQ_INT2);
+	enable_m32700ut_lcdpld_irq(data->irq);
+	enable_m32700ut_irq(M32R_IRQ_INT2);
 }
 
-static unsigned int startup_m32700ut_lcdpld_irq(unsigned int irq)
-{
-	enable_m32700ut_lcdpld_irq(irq);
-	return (0);
-}
-
-static void shutdown_m32700ut_lcdpld_irq(unsigned int irq)
+static void shutdown_m32700ut_lcdpld(struct irq_data *data)
 {
 	unsigned long port;
 	unsigned int pldirq;
 
-	pldirq = irq2lcdpldirq(irq);
+	pldirq = irq2lcdpldirq(data->irq);
 	port = lcdpldirq2port(pldirq);
 	outw(PLD_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip m32700ut_lcdpld_irq_type =
 {
-	.name = "M32700UT-PLD-LCD-IRQ",
-	.startup = startup_m32700ut_lcdpld_irq,
-	.shutdown = shutdown_m32700ut_lcdpld_irq,
-	.enable = enable_m32700ut_lcdpld_irq,
-	.disable = disable_m32700ut_lcdpld_irq,
-	.ack = mask_and_ack_m32700ut_lcdpld,
-	.end = end_m32700ut_lcdpld_irq
+	.name		= "M32700UT-PLD-LCD-IRQ",
+	.irq_shutdown	= shutdown_m32700ut_lcdpld,
+	.irq_mask	= mask_m32700ut_lcdpld,
+	.irq_unmask	= unmask_m32700ut_lcdpld,
 };
 
 void __init init_IRQ(void)
 {
 #if defined(CONFIG_SMC91X)
 	/* INT#0: LAN controller on M32700UT-LAN (SMC91C111)*/
-	irq_desc[M32700UT_LAN_IRQ_LAN].status = IRQ_DISABLED;
-	irq_desc[M32700UT_LAN_IRQ_LAN].chip = &m32700ut_lanpld_irq_type;
-	irq_desc[M32700UT_LAN_IRQ_LAN].action = 0;
-	irq_desc[M32700UT_LAN_IRQ_LAN].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(M32700UT_LAN_IRQ_LAN,
+				 &m32700ut_lanpld_irq_type, handle_level_irq);
 	lanpld_icu_data[irq2lanpldirq(M32700UT_LAN_IRQ_LAN)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02;	/* "H" edge sense */
 	disable_m32700ut_lanpld_irq(M32700UT_LAN_IRQ_LAN);
 #endif  /* CONFIG_SMC91X */
 
 	/* MFT2 : system timer */
-	irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_MFT2].chip = &m32700ut_irq_type;
-	irq_desc[M32R_IRQ_MFT2].action = 0;
-	irq_desc[M32R_IRQ_MFT2].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_MFT2, &m32700ut_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
 	disable_m32700ut_irq(M32R_IRQ_MFT2);
 
 	/* SIO0 : receive */
-	irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_R].chip = &m32700ut_irq_type;
-	irq_desc[M32R_IRQ_SIO0_R].action = 0;
-	irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &m32700ut_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_R].icucr = 0;
 	disable_m32700ut_irq(M32R_IRQ_SIO0_R);
 
 	/* SIO0 : send */
-	irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_S].chip = &m32700ut_irq_type;
-	irq_desc[M32R_IRQ_SIO0_S].action = 0;
-	irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &m32700ut_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_S].icucr = 0;
 	disable_m32700ut_irq(M32R_IRQ_SIO0_S);
 
 	/* SIO1 : receive */
-	irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_R].chip = &m32700ut_irq_type;
-	irq_desc[M32R_IRQ_SIO1_R].action = 0;
-	irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &m32700ut_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_R].icucr = 0;
 	disable_m32700ut_irq(M32R_IRQ_SIO1_R);
 
 	/* SIO1 : send */
-	irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_S].chip = &m32700ut_irq_type;
-	irq_desc[M32R_IRQ_SIO1_S].action = 0;
-	irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &m32700ut_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_S].icucr = 0;
 	disable_m32700ut_irq(M32R_IRQ_SIO1_S);
 
 	/* DMA1 : */
-	irq_desc[M32R_IRQ_DMA1].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_DMA1].chip = &m32700ut_irq_type;
-	irq_desc[M32R_IRQ_DMA1].action = 0;
-	irq_desc[M32R_IRQ_DMA1].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_DMA1, &m32700ut_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_DMA1].icucr = 0;
 	disable_m32700ut_irq(M32R_IRQ_DMA1);
 
 #ifdef CONFIG_SERIAL_M32R_PLDSIO
 	/* INT#1: SIO0 Receive on PLD */
-	irq_desc[PLD_IRQ_SIO0_RCV].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_SIO0_RCV].chip = &m32700ut_pld_irq_type;
-	irq_desc[PLD_IRQ_SIO0_RCV].action = 0;
-	irq_desc[PLD_IRQ_SIO0_RCV].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_SIO0_RCV, &m32700ut_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_RCV)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03;
 	disable_m32700ut_pld_irq(PLD_IRQ_SIO0_RCV);
 
 	/* INT#1: SIO0 Send on PLD */
-	irq_desc[PLD_IRQ_SIO0_SND].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_SIO0_SND].chip = &m32700ut_pld_irq_type;
-	irq_desc[PLD_IRQ_SIO0_SND].action = 0;
-	irq_desc[PLD_IRQ_SIO0_SND].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_SIO0_SND, &m32700ut_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_SND)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03;
 	disable_m32700ut_pld_irq(PLD_IRQ_SIO0_SND);
 #endif  /* CONFIG_SERIAL_M32R_PLDSIO */
 
 	/* INT#1: CFC IREQ on PLD */
-	irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFIREQ].chip = &m32700ut_pld_irq_type;
-	irq_desc[PLD_IRQ_CFIREQ].action = 0;
-	irq_desc[PLD_IRQ_CFIREQ].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFIREQ, &m32700ut_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_CFIREQ)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01;	/* 'L' level sense */
 	disable_m32700ut_pld_irq(PLD_IRQ_CFIREQ);
 
 	/* INT#1: CFC Insert on PLD */
-	irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFC_INSERT].chip = &m32700ut_pld_irq_type;
-	irq_desc[PLD_IRQ_CFC_INSERT].action = 0;
-	irq_desc[PLD_IRQ_CFC_INSERT].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFC_INSERT, &m32700ut_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_CFC_INSERT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD00;	/* 'L' edge sense */
 	disable_m32700ut_pld_irq(PLD_IRQ_CFC_INSERT);
 
 	/* INT#1: CFC Eject on PLD */
-	irq_desc[PLD_IRQ_CFC_EJECT].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFC_EJECT].chip = &m32700ut_pld_irq_type;
-	irq_desc[PLD_IRQ_CFC_EJECT].action = 0;
-	irq_desc[PLD_IRQ_CFC_EJECT].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFC_EJECT, &m32700ut_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_CFC_EJECT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02;	/* 'H' edge sense */
 	disable_m32700ut_pld_irq(PLD_IRQ_CFC_EJECT);
 
@@ -413,13 +349,11 @@
 
 #if defined(CONFIG_USB)
 	outw(USBCR_OTGS, USBCR); 	/* USBCR: non-OTG */
+	set_irq_chip_and_handler(M32700UT_LCD_IRQ_USB_INT1,
+				 &m32700ut_lcdpld_irq_type, handle_level_irq);
 
-    irq_desc[M32700UT_LCD_IRQ_USB_INT1].status = IRQ_DISABLED;
-    irq_desc[M32700UT_LCD_IRQ_USB_INT1].chip = &m32700ut_lcdpld_irq_type;
-    irq_desc[M32700UT_LCD_IRQ_USB_INT1].action = 0;
-    irq_desc[M32700UT_LCD_IRQ_USB_INT1].depth = 1;
-    lcdpld_icu_data[irq2lcdpldirq(M32700UT_LCD_IRQ_USB_INT1)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01;	/* "L" level sense */
-    disable_m32700ut_lcdpld_irq(M32700UT_LCD_IRQ_USB_INT1);
+	lcdpld_icu_data[irq2lcdpldirq(M32700UT_LCD_IRQ_USB_INT1)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01;	/* "L" level sense */
+	disable_m32700ut_lcdpld_irq(M32700UT_LCD_IRQ_USB_INT1);
 #endif
 	/*
 	 * INT2# is used for BAT, USB, AUDIO
@@ -432,10 +366,8 @@
 	/*
 	 * INT3# is used for AR
 	 */
-	irq_desc[M32R_IRQ_INT3].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT3].chip = &m32700ut_irq_type;
-	irq_desc[M32R_IRQ_INT3].action = 0;
-	irq_desc[M32R_IRQ_INT3].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_INT3, &m32700ut_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_INT3].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
 	disable_m32700ut_irq(M32R_IRQ_INT3);
 #endif	/* CONFIG_VIDEO_M32R_AR */
diff --git a/arch/m32r/platforms/mappi/setup.c b/arch/m32r/platforms/mappi/setup.c
index ea00c84..cdd8c45 100644
--- a/arch/m32r/platforms/mappi/setup.c
+++ b/arch/m32r/platforms/mappi/setup.c
@@ -38,40 +38,30 @@
 	outl(data, port);
 }
 
-static void mask_and_ack_mappi(unsigned int irq)
+static void mask_mappi(struct irq_data *data)
 {
-	disable_mappi_irq(irq);
+	disable_mappi_irq(data->irq);
 }
 
-static void end_mappi_irq(unsigned int irq)
+static void unmask_mappi(struct irq_data *data)
 {
-	if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
-		enable_mappi_irq(irq);
+	enable_mappi_irq(data->irq);
 }
 
-static unsigned int startup_mappi_irq(unsigned int irq)
-{
-	enable_mappi_irq(irq);
-	return (0);
-}
-
-static void shutdown_mappi_irq(unsigned int irq)
+static void shutdown_mappi(struct irq_data *data)
 {
 	unsigned long port;
 
-	port = irq2port(irq);
+	port = irq2port(data->irq);
 	outl(M32R_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip mappi_irq_type =
 {
-	.name = "MAPPI-IRQ",
-	.startup = startup_mappi_irq,
-	.shutdown = shutdown_mappi_irq,
-	.enable = enable_mappi_irq,
-	.disable = disable_mappi_irq,
-	.ack = mask_and_ack_mappi,
-	.end = end_mappi_irq
+	.name		= "MAPPI-IRQ",
+	.irq_shutdown	= shutdown_mappi,
+	.irq_mask	= mask_mappi,
+	.irq_unmask	= unmask_mappi,
 };
 
 void __init init_IRQ(void)
@@ -85,70 +75,54 @@
 
 #ifdef CONFIG_NE2000
 	/* INT0 : LAN controller (RTL8019AS) */
-	irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT0].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_INT0].action = NULL;
-	irq_desc[M32R_IRQ_INT0].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_INT0, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD11;
 	disable_mappi_irq(M32R_IRQ_INT0);
 #endif /* CONFIG_M32R_NE2000 */
 
 	/* MFT2 : system timer */
-	irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_MFT2].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_MFT2].action = NULL;
-	irq_desc[M32R_IRQ_MFT2].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_MFT2, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
 	disable_mappi_irq(M32R_IRQ_MFT2);
 
 #ifdef CONFIG_SERIAL_M32R_SIO
 	/* SIO0_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_R].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_SIO0_R].action = NULL;
-	irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_R].icucr = 0;
 	disable_mappi_irq(M32R_IRQ_SIO0_R);
 
 	/* SIO0_S : uart send data */
-	irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_S].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_SIO0_S].action = NULL;
-	irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_S].icucr = 0;
 	disable_mappi_irq(M32R_IRQ_SIO0_S);
 
 	/* SIO1_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_R].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_SIO1_R].action = NULL;
-	irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_R].icucr = 0;
 	disable_mappi_irq(M32R_IRQ_SIO1_R);
 
 	/* SIO1_S : uart send data */
-	irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_S].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_SIO1_S].action = NULL;
-	irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_S].icucr = 0;
 	disable_mappi_irq(M32R_IRQ_SIO1_S);
 #endif /* CONFIG_SERIAL_M32R_SIO */
 
 #if defined(CONFIG_M32R_PCC)
 	/* INT1 : pccard0 interrupt */
-	irq_desc[M32R_IRQ_INT1].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT1].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_INT1].action = NULL;
-	irq_desc[M32R_IRQ_INT1].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_INT1, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD00;
 	disable_mappi_irq(M32R_IRQ_INT1);
 
 	/* INT2 : pccard1 interrupt */
-	irq_desc[M32R_IRQ_INT2].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT2].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_INT2].action = NULL;
-	irq_desc[M32R_IRQ_INT2].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_INT2, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_INT2].icucr = M32R_ICUCR_IEN | M32R_ICUCR_ISMOD00;
 	disable_mappi_irq(M32R_IRQ_INT2);
 #endif /* CONFIG_M32RPCC */
diff --git a/arch/m32r/platforms/mappi2/setup.c b/arch/m32r/platforms/mappi2/setup.c
index c049376..9117c30 100644
--- a/arch/m32r/platforms/mappi2/setup.c
+++ b/arch/m32r/platforms/mappi2/setup.c
@@ -46,126 +46,97 @@
 	outl(data, port);
 }
 
-static void mask_and_ack_mappi2(unsigned int irq)
+static void mask_mappi2(struct irq_data *data)
 {
-	disable_mappi2_irq(irq);
+	disable_mappi2_irq(data->irq);
 }
 
-static void end_mappi2_irq(unsigned int irq)
+static void unmask_mappi2(struct irq_data *data)
 {
-	enable_mappi2_irq(irq);
+	enable_mappi2_irq(data->irq);
 }
 
-static unsigned int startup_mappi2_irq(unsigned int irq)
-{
-	enable_mappi2_irq(irq);
-	return (0);
-}
-
-static void shutdown_mappi2_irq(unsigned int irq)
+static void shutdown_mappi2(struct irq_data *data)
 {
 	unsigned long port;
 
-	port = irq2port(irq);
+	port = irq2port(data->irq);
 	outl(M32R_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip mappi2_irq_type =
 {
-	.name = "MAPPI2-IRQ",
-	.startup = startup_mappi2_irq,
-	.shutdown = shutdown_mappi2_irq,
-	.enable = enable_mappi2_irq,
-	.disable = disable_mappi2_irq,
-	.ack = mask_and_ack_mappi2,
-	.end = end_mappi2_irq
+	.name		= "MAPPI2-IRQ",
+	.irq_shutdown	= shutdown_mappi2,
+	.irq_mask	= mask_mappi2,
+	.irq_unmask	= unmask_mappi2,
 };
 
 void __init init_IRQ(void)
 {
 #if defined(CONFIG_SMC91X)
 	/* INT0 : LAN controller (SMC91111) */
-	irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT0].chip = &mappi2_irq_type;
-	irq_desc[M32R_IRQ_INT0].action = 0;
-	irq_desc[M32R_IRQ_INT0].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_INT0, &mappi2_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
 	disable_mappi2_irq(M32R_IRQ_INT0);
 #endif  /* CONFIG_SMC91X */
 
 	/* MFT2 : system timer */
-	irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_MFT2].chip = &mappi2_irq_type;
-	irq_desc[M32R_IRQ_MFT2].action = 0;
-	irq_desc[M32R_IRQ_MFT2].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_MFT2, &mappi2_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
 	disable_mappi2_irq(M32R_IRQ_MFT2);
 
 #ifdef CONFIG_SERIAL_M32R_SIO
 	/* SIO0_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_R].chip = &mappi2_irq_type;
-	irq_desc[M32R_IRQ_SIO0_R].action = 0;
-	irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &mappi2_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_R].icucr = 0;
 	disable_mappi2_irq(M32R_IRQ_SIO0_R);
 
 	/* SIO0_S : uart send data */
-	irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_S].chip = &mappi2_irq_type;
-	irq_desc[M32R_IRQ_SIO0_S].action = 0;
-	irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &mappi2_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_S].icucr = 0;
 	disable_mappi2_irq(M32R_IRQ_SIO0_S);
 	/* SIO1_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_R].chip = &mappi2_irq_type;
-	irq_desc[M32R_IRQ_SIO1_R].action = 0;
-	irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &mappi2_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_R].icucr = 0;
 	disable_mappi2_irq(M32R_IRQ_SIO1_R);
 
 	/* SIO1_S : uart send data */
-	irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_S].chip = &mappi2_irq_type;
-	irq_desc[M32R_IRQ_SIO1_S].action = 0;
-	irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &mappi2_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_S].icucr = 0;
 	disable_mappi2_irq(M32R_IRQ_SIO1_S);
 #endif  /* CONFIG_M32R_USE_DBG_CONSOLE */
 
 #if defined(CONFIG_USB)
 	/* INT1 : USB Host controller interrupt */
-	irq_desc[M32R_IRQ_INT1].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT1].chip = &mappi2_irq_type;
-	irq_desc[M32R_IRQ_INT1].action = 0;
-	irq_desc[M32R_IRQ_INT1].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_INT1, &mappi2_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_ISMOD01;
 	disable_mappi2_irq(M32R_IRQ_INT1);
 #endif /* CONFIG_USB */
 
 	/* ICUCR40: CFC IREQ */
-	irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFIREQ].chip = &mappi2_irq_type;
-	irq_desc[PLD_IRQ_CFIREQ].action = 0;
-	irq_desc[PLD_IRQ_CFIREQ].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFIREQ, &mappi2_irq_type,
+				 handle_level_irq);
 	icu_data[PLD_IRQ_CFIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD01;
 	disable_mappi2_irq(PLD_IRQ_CFIREQ);
 
 #if defined(CONFIG_M32R_CFC)
 	/* ICUCR41: CFC Insert */
-	irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFC_INSERT].chip = &mappi2_irq_type;
-	irq_desc[PLD_IRQ_CFC_INSERT].action = 0;
-	irq_desc[PLD_IRQ_CFC_INSERT].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFC_INSERT, &mappi2_irq_type,
+				 handle_level_irq);
 	icu_data[PLD_IRQ_CFC_INSERT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD00;
 	disable_mappi2_irq(PLD_IRQ_CFC_INSERT);
 
 	/* ICUCR42: CFC Eject */
-	irq_desc[PLD_IRQ_CFC_EJECT].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFC_EJECT].chip = &mappi2_irq_type;
-	irq_desc[PLD_IRQ_CFC_EJECT].action = 0;
-	irq_desc[PLD_IRQ_CFC_EJECT].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFC_EJECT, &mappi2_irq_type,
+				 handle_level_irq);
 	icu_data[PLD_IRQ_CFC_EJECT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
 	disable_mappi2_irq(PLD_IRQ_CFC_EJECT);
 #endif /* CONFIG_MAPPI2_CFC */
diff --git a/arch/m32r/platforms/mappi3/setup.c b/arch/m32r/platforms/mappi3/setup.c
index 882de25..b44f5de 100644
--- a/arch/m32r/platforms/mappi3/setup.c
+++ b/arch/m32r/platforms/mappi3/setup.c
@@ -46,128 +46,98 @@
 	outl(data, port);
 }
 
-static void mask_and_ack_mappi3(unsigned int irq)
+static void mask_mappi3(struct irq_data *data)
 {
-	disable_mappi3_irq(irq);
+	disable_mappi3_irq(data->irq);
 }
 
-static void end_mappi3_irq(unsigned int irq)
+static void unmask_mappi3(struct irq_data *data)
 {
-	enable_mappi3_irq(irq);
+	enable_mappi3_irq(data->irq);
 }
 
-static unsigned int startup_mappi3_irq(unsigned int irq)
-{
-	enable_mappi3_irq(irq);
-	return (0);
-}
-
-static void shutdown_mappi3_irq(unsigned int irq)
+static void shutdown_mappi3(struct irq_data *data)
 {
 	unsigned long port;
 
-	port = irq2port(irq);
+	port = irq2port(data->irq);
 	outl(M32R_ICUCR_ILEVEL7, port);
 }
 
-static struct irq_chip mappi3_irq_type =
-{
-	.name = "MAPPI3-IRQ",
-	.startup = startup_mappi3_irq,
-	.shutdown = shutdown_mappi3_irq,
-	.enable = enable_mappi3_irq,
-	.disable = disable_mappi3_irq,
-	.ack = mask_and_ack_mappi3,
-	.end = end_mappi3_irq
+static struct irq_chip mappi3_irq_type = {
+	.name		= "MAPPI3-IRQ",
+	.irq_shutdown	= shutdown_mappi3,
+	.irq_mask	= mask_mappi3,
+	.irq_unmask	= unmask_mappi3,
 };
 
 void __init init_IRQ(void)
 {
 #if defined(CONFIG_SMC91X)
 	/* INT0 : LAN controller (SMC91111) */
-	irq_desc[M32R_IRQ_INT0].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT0].chip = &mappi3_irq_type;
-	irq_desc[M32R_IRQ_INT0].action = 0;
-	irq_desc[M32R_IRQ_INT0].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_INT0, &mappi3_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_INT0].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
 	disable_mappi3_irq(M32R_IRQ_INT0);
 #endif  /* CONFIG_SMC91X */
 
 	/* MFT2 : system timer */
-	irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_MFT2].chip = &mappi3_irq_type;
-	irq_desc[M32R_IRQ_MFT2].action = 0;
-	irq_desc[M32R_IRQ_MFT2].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_MFT2, &mappi3_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
 	disable_mappi3_irq(M32R_IRQ_MFT2);
 
 #ifdef CONFIG_SERIAL_M32R_SIO
 	/* SIO0_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_R].chip = &mappi3_irq_type;
-	irq_desc[M32R_IRQ_SIO0_R].action = 0;
-	irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &mappi3_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_R].icucr = 0;
 	disable_mappi3_irq(M32R_IRQ_SIO0_R);
 
 	/* SIO0_S : uart send data */
-	irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_S].chip = &mappi3_irq_type;
-	irq_desc[M32R_IRQ_SIO0_S].action = 0;
-	irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &mappi3_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_S].icucr = 0;
 	disable_mappi3_irq(M32R_IRQ_SIO0_S);
 	/* SIO1_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_R].chip = &mappi3_irq_type;
-	irq_desc[M32R_IRQ_SIO1_R].action = 0;
-	irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &mappi3_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_R].icucr = 0;
 	disable_mappi3_irq(M32R_IRQ_SIO1_R);
 
 	/* SIO1_S : uart send data */
-	irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_S].chip = &mappi3_irq_type;
-	irq_desc[M32R_IRQ_SIO1_S].action = 0;
-	irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &mappi3_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_S].icucr = 0;
 	disable_mappi3_irq(M32R_IRQ_SIO1_S);
 #endif  /* CONFIG_M32R_USE_DBG_CONSOLE */
 
 #if defined(CONFIG_USB)
 	/* INT1 : USB Host controller interrupt */
-	irq_desc[M32R_IRQ_INT1].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT1].chip = &mappi3_irq_type;
-	irq_desc[M32R_IRQ_INT1].action = 0;
-	irq_desc[M32R_IRQ_INT1].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_INT1, &mappi3_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_INT1].icucr = M32R_ICUCR_ISMOD01;
 	disable_mappi3_irq(M32R_IRQ_INT1);
 #endif /* CONFIG_USB */
 
 	/* CFC IREQ */
-	irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFIREQ].chip = &mappi3_irq_type;
-	irq_desc[PLD_IRQ_CFIREQ].action = 0;
-	irq_desc[PLD_IRQ_CFIREQ].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFIREQ, &mappi3_irq_type,
+				 handle_level_irq);
 	icu_data[PLD_IRQ_CFIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD01;
 	disable_mappi3_irq(PLD_IRQ_CFIREQ);
 
 #if defined(CONFIG_M32R_CFC)
 	/* ICUCR41: CFC Insert & eject */
-	irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFC_INSERT].chip = &mappi3_irq_type;
-	irq_desc[PLD_IRQ_CFC_INSERT].action = 0;
-	irq_desc[PLD_IRQ_CFC_INSERT].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFC_INSERT, &mappi3_irq_type,
+				 handle_level_irq);
 	icu_data[PLD_IRQ_CFC_INSERT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD00;
 	disable_mappi3_irq(PLD_IRQ_CFC_INSERT);
 
 #endif /* CONFIG_M32R_CFC */
 
 	/* IDE IREQ */
-	irq_desc[PLD_IRQ_IDEIREQ].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_IDEIREQ].chip = &mappi3_irq_type;
-	irq_desc[PLD_IRQ_IDEIREQ].action = 0;
-	irq_desc[PLD_IRQ_IDEIREQ].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_IDEIREQ, &mappi3_irq_type,
+				 handle_level_irq);
 	icu_data[PLD_IRQ_IDEIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
 	disable_mappi3_irq(PLD_IRQ_IDEIREQ);
 
diff --git a/arch/m32r/platforms/oaks32r/setup.c b/arch/m32r/platforms/oaks32r/setup.c
index d11d93b..19a02db 100644
--- a/arch/m32r/platforms/oaks32r/setup.c
+++ b/arch/m32r/platforms/oaks32r/setup.c
@@ -37,39 +37,30 @@
 	outl(data, port);
 }
 
-static void mask_and_ack_mappi(unsigned int irq)
+static void mask_oaks32r(struct irq_data *data)
 {
-	disable_oaks32r_irq(irq);
+	disable_oaks32r_irq(data->irq);
 }
 
-static void end_oaks32r_irq(unsigned int irq)
+static void unmask_oaks32r(struct irq_data *data)
 {
-	enable_oaks32r_irq(irq);
+	enable_oaks32r_irq(data->irq);
 }
 
-static unsigned int startup_oaks32r_irq(unsigned int irq)
-{
-	enable_oaks32r_irq(irq);
-	return (0);
-}
-
-static void shutdown_oaks32r_irq(unsigned int irq)
+static void shutdown_oaks32r(struct irq_data *data)
 {
 	unsigned long port;
 
-	port = irq2port(irq);
+	port = irq2port(data->irq);
 	outl(M32R_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip oaks32r_irq_type =
 {
-	.name = "OAKS32R-IRQ",
-	.startup = startup_oaks32r_irq,
-	.shutdown = shutdown_oaks32r_irq,
-	.enable = enable_oaks32r_irq,
-	.disable = disable_oaks32r_irq,
-	.ack = mask_and_ack_mappi,
-	.end = end_oaks32r_irq
+	.name		= "OAKS32R-IRQ",
+	.irq_shutdown	= shutdown_oaks32r,
+	.irq_mask	= mask_oaks32r,
+	.irq_unmask	= unmask_oaks32r,
 };
 
 void __init init_IRQ(void)
@@ -83,52 +74,40 @@
 
 #ifdef CONFIG_NE2000
 	/* INT3 : LAN controller (RTL8019AS) */
-	irq_desc[M32R_IRQ_INT3].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT3].chip = &oaks32r_irq_type;
-	irq_desc[M32R_IRQ_INT3].action = 0;
-	irq_desc[M32R_IRQ_INT3].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_INT3, &oaks32r_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_INT3].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
 	disable_oaks32r_irq(M32R_IRQ_INT3);
 #endif /* CONFIG_M32R_NE2000 */
 
 	/* MFT2 : system timer */
-	irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_MFT2].chip = &oaks32r_irq_type;
-	irq_desc[M32R_IRQ_MFT2].action = 0;
-	irq_desc[M32R_IRQ_MFT2].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_MFT2, &oaks32r_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
 	disable_oaks32r_irq(M32R_IRQ_MFT2);
 
 #ifdef CONFIG_SERIAL_M32R_SIO
 	/* SIO0_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_R].chip = &oaks32r_irq_type;
-	irq_desc[M32R_IRQ_SIO0_R].action = 0;
-	irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &oaks32r_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_R].icucr = 0;
 	disable_oaks32r_irq(M32R_IRQ_SIO0_R);
 
 	/* SIO0_S : uart send data */
-	irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_S].chip = &oaks32r_irq_type;
-	irq_desc[M32R_IRQ_SIO0_S].action = 0;
-	irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &oaks32r_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_S].icucr = 0;
 	disable_oaks32r_irq(M32R_IRQ_SIO0_S);
 
 	/* SIO1_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_R].chip = &oaks32r_irq_type;
-	irq_desc[M32R_IRQ_SIO1_R].action = 0;
-	irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &oaks32r_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_R].icucr = 0;
 	disable_oaks32r_irq(M32R_IRQ_SIO1_R);
 
 	/* SIO1_S : uart send data */
-	irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_S].chip = &oaks32r_irq_type;
-	irq_desc[M32R_IRQ_SIO1_S].action = 0;
-	irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &oaks32r_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_S].icucr = 0;
 	disable_oaks32r_irq(M32R_IRQ_SIO1_S);
 #endif /* CONFIG_SERIAL_M32R_SIO */
diff --git a/arch/m32r/platforms/opsput/setup.c b/arch/m32r/platforms/opsput/setup.c
index 5f3402a..1273154 100644
--- a/arch/m32r/platforms/opsput/setup.c
+++ b/arch/m32r/platforms/opsput/setup.c
@@ -46,39 +46,30 @@
 	outl(data, port);
 }
 
-static void mask_and_ack_opsput(unsigned int irq)
+static void mask_opsput(struct irq_data *data)
 {
-	disable_opsput_irq(irq);
+	disable_opsput_irq(data->irq);
 }
 
-static void end_opsput_irq(unsigned int irq)
+static void unmask_opsput(struct irq_data *data)
 {
-	enable_opsput_irq(irq);
+	enable_opsput_irq(data->irq);
 }
 
-static unsigned int startup_opsput_irq(unsigned int irq)
-{
-	enable_opsput_irq(irq);
-	return (0);
-}
-
-static void shutdown_opsput_irq(unsigned int irq)
+static void shutdown_opsput(struct irq_data *data)
 {
 	unsigned long port;
 
-	port = irq2port(irq);
+	port = irq2port(data->irq);
 	outl(M32R_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip opsput_irq_type =
 {
-	.name = "OPSPUT-IRQ",
-	.startup = startup_opsput_irq,
-	.shutdown = shutdown_opsput_irq,
-	.enable = enable_opsput_irq,
-	.disable = disable_opsput_irq,
-	.ack = mask_and_ack_opsput,
-	.end = end_opsput_irq
+	.name		= "OPSPUT-IRQ",
+	.irq_shutdown	= shutdown_opsput,
+	.irq_mask	= mask_opsput,
+	.irq_unmask	= unmask_opsput,
 };
 
 /*
@@ -100,7 +91,6 @@
 	unsigned int pldirq;
 
 	pldirq = irq2pldirq(irq);
-//	disable_opsput_irq(M32R_IRQ_INT1);
 	port = pldirq2port(pldirq);
 	data = pld_icu_data[pldirq].icucr|PLD_ICUCR_ILEVEL7;
 	outw(data, port);
@@ -112,50 +102,38 @@
 	unsigned int pldirq;
 
 	pldirq = irq2pldirq(irq);
-//	enable_opsput_irq(M32R_IRQ_INT1);
 	port = pldirq2port(pldirq);
 	data = pld_icu_data[pldirq].icucr|PLD_ICUCR_IEN|PLD_ICUCR_ILEVEL6;
 	outw(data, port);
 }
 
-static void mask_and_ack_opsput_pld(unsigned int irq)
+static void mask_opsput_pld(struct irq_data *data)
 {
-	disable_opsput_pld_irq(irq);
-//	mask_and_ack_opsput(M32R_IRQ_INT1);
+	disable_opsput_pld_irq(data->irq);
 }
 
-static void end_opsput_pld_irq(unsigned int irq)
+static void unmask_opsput_pld(struct irq_data *data)
 {
-	enable_opsput_pld_irq(irq);
-	end_opsput_irq(M32R_IRQ_INT1);
+	enable_opsput_pld_irq(data->irq);
+	enable_opsput_irq(M32R_IRQ_INT1);
 }
 
-static unsigned int startup_opsput_pld_irq(unsigned int irq)
-{
-	enable_opsput_pld_irq(irq);
-	return (0);
-}
-
-static void shutdown_opsput_pld_irq(unsigned int irq)
+static void shutdown_opsput_pld(struct irq_data *data)
 {
 	unsigned long port;
 	unsigned int pldirq;
 
-	pldirq = irq2pldirq(irq);
-//	shutdown_opsput_irq(M32R_IRQ_INT1);
+	pldirq = irq2pldirq(data->irq);
 	port = pldirq2port(pldirq);
 	outw(PLD_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip opsput_pld_irq_type =
 {
-	.name = "OPSPUT-PLD-IRQ",
-	.startup = startup_opsput_pld_irq,
-	.shutdown = shutdown_opsput_pld_irq,
-	.enable = enable_opsput_pld_irq,
-	.disable = disable_opsput_pld_irq,
-	.ack = mask_and_ack_opsput_pld,
-	.end = end_opsput_pld_irq
+	.name		= "OPSPUT-PLD-IRQ",
+	.irq_shutdown	= shutdown_opsput_pld,
+	.irq_mask	= mask_opsput_pld,
+	.irq_unmask	= unmask_opsput_pld,
 };
 
 /*
@@ -189,42 +167,33 @@
 	outw(data, port);
 }
 
-static void mask_and_ack_opsput_lanpld(unsigned int irq)
+static void mask_opsput_lanpld(struct irq_data *data)
 {
-	disable_opsput_lanpld_irq(irq);
+	disable_opsput_lanpld_irq(data->irq);
 }
 
-static void end_opsput_lanpld_irq(unsigned int irq)
+static void unmask_opsput_lanpld(struct irq_data *data)
 {
-	enable_opsput_lanpld_irq(irq);
-	end_opsput_irq(M32R_IRQ_INT0);
+	enable_opsput_lanpld_irq(data->irq);
+	enable_opsput_irq(M32R_IRQ_INT0);
 }
 
-static unsigned int startup_opsput_lanpld_irq(unsigned int irq)
-{
-	enable_opsput_lanpld_irq(irq);
-	return (0);
-}
-
-static void shutdown_opsput_lanpld_irq(unsigned int irq)
+static void shutdown_opsput_lanpld(struct irq_data *data)
 {
 	unsigned long port;
 	unsigned int pldirq;
 
-	pldirq = irq2lanpldirq(irq);
+	pldirq = irq2lanpldirq(data->irq);
 	port = lanpldirq2port(pldirq);
 	outw(PLD_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip opsput_lanpld_irq_type =
 {
-	.name = "OPSPUT-PLD-LAN-IRQ",
-	.startup = startup_opsput_lanpld_irq,
-	.shutdown = shutdown_opsput_lanpld_irq,
-	.enable = enable_opsput_lanpld_irq,
-	.disable = disable_opsput_lanpld_irq,
-	.ack = mask_and_ack_opsput_lanpld,
-	.end = end_opsput_lanpld_irq
+	.name		= "OPSPUT-PLD-LAN-IRQ",
+	.irq_shutdown	= shutdown_opsput_lanpld,
+	.irq_mask	= mask_opsput_lanpld,
+	.irq_unmask	= unmask_opsput_lanpld,
 };
 
 /*
@@ -258,143 +227,109 @@
 	outw(data, port);
 }
 
-static void mask_and_ack_opsput_lcdpld(unsigned int irq)
+static void mask_opsput_lcdpld(struct irq_data *data)
 {
-	disable_opsput_lcdpld_irq(irq);
+	disable_opsput_lcdpld_irq(data->irq);
 }
 
-static void end_opsput_lcdpld_irq(unsigned int irq)
+static void unmask_opsput_lcdpld(struct irq_data *data)
 {
-	enable_opsput_lcdpld_irq(irq);
-	end_opsput_irq(M32R_IRQ_INT2);
+	enable_opsput_lcdpld_irq(data->irq);
+	enable_opsput_irq(M32R_IRQ_INT2);
 }
 
-static unsigned int startup_opsput_lcdpld_irq(unsigned int irq)
-{
-	enable_opsput_lcdpld_irq(irq);
-	return (0);
-}
-
-static void shutdown_opsput_lcdpld_irq(unsigned int irq)
+static void shutdown_opsput_lcdpld(struct irq_data *data)
 {
 	unsigned long port;
 	unsigned int pldirq;
 
-	pldirq = irq2lcdpldirq(irq);
+	pldirq = irq2lcdpldirq(data->irq);
 	port = lcdpldirq2port(pldirq);
 	outw(PLD_ICUCR_ILEVEL7, port);
 }
 
-static struct irq_chip opsput_lcdpld_irq_type =
-{
-	"OPSPUT-PLD-LCD-IRQ",
-	startup_opsput_lcdpld_irq,
-	shutdown_opsput_lcdpld_irq,
-	enable_opsput_lcdpld_irq,
-	disable_opsput_lcdpld_irq,
-	mask_and_ack_opsput_lcdpld,
-	end_opsput_lcdpld_irq
+static struct irq_chip opsput_lcdpld_irq_type = {
+	.name		= "OPSPUT-PLD-LCD-IRQ",
+	.irq_shutdown	= shutdown_opsput_lcdpld,
+	.irq_mask	= mask_opsput_lcdpld,
+	.irq_unmask	= unmask_opsput_lcdpld,
 };
 
 void __init init_IRQ(void)
 {
 #if defined(CONFIG_SMC91X)
 	/* INT#0: LAN controller on OPSPUT-LAN (SMC91C111)*/
-	irq_desc[OPSPUT_LAN_IRQ_LAN].status = IRQ_DISABLED;
-	irq_desc[OPSPUT_LAN_IRQ_LAN].chip = &opsput_lanpld_irq_type;
-	irq_desc[OPSPUT_LAN_IRQ_LAN].action = 0;
-	irq_desc[OPSPUT_LAN_IRQ_LAN].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(OPSPUT_LAN_IRQ_LAN, &opsput_lanpld_irq_type,
+				 handle_level_irq);
 	lanpld_icu_data[irq2lanpldirq(OPSPUT_LAN_IRQ_LAN)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02;	/* "H" edge sense */
 	disable_opsput_lanpld_irq(OPSPUT_LAN_IRQ_LAN);
 #endif  /* CONFIG_SMC91X */
 
 	/* MFT2 : system timer */
-	irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_MFT2].chip = &opsput_irq_type;
-	irq_desc[M32R_IRQ_MFT2].action = 0;
-	irq_desc[M32R_IRQ_MFT2].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_MFT2, &opsput_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
 	disable_opsput_irq(M32R_IRQ_MFT2);
 
 	/* SIO0 : receive */
-	irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_R].chip = &opsput_irq_type;
-	irq_desc[M32R_IRQ_SIO0_R].action = 0;
-	irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &opsput_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_R].icucr = 0;
 	disable_opsput_irq(M32R_IRQ_SIO0_R);
 
 	/* SIO0 : send */
-	irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_S].chip = &opsput_irq_type;
-	irq_desc[M32R_IRQ_SIO0_S].action = 0;
-	irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &opsput_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_S].icucr = 0;
 	disable_opsput_irq(M32R_IRQ_SIO0_S);
 
 	/* SIO1 : receive */
-	irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_R].chip = &opsput_irq_type;
-	irq_desc[M32R_IRQ_SIO1_R].action = 0;
-	irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &opsput_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_R].icucr = 0;
 	disable_opsput_irq(M32R_IRQ_SIO1_R);
 
 	/* SIO1 : send */
-	irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_S].chip = &opsput_irq_type;
-	irq_desc[M32R_IRQ_SIO1_S].action = 0;
-	irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &opsput_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_S].icucr = 0;
 	disable_opsput_irq(M32R_IRQ_SIO1_S);
 
 	/* DMA1 : */
-	irq_desc[M32R_IRQ_DMA1].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_DMA1].chip = &opsput_irq_type;
-	irq_desc[M32R_IRQ_DMA1].action = 0;
-	irq_desc[M32R_IRQ_DMA1].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_DMA1, &opsput_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_DMA1].icucr = 0;
 	disable_opsput_irq(M32R_IRQ_DMA1);
 
 #ifdef CONFIG_SERIAL_M32R_PLDSIO
 	/* INT#1: SIO0 Receive on PLD */
-	irq_desc[PLD_IRQ_SIO0_RCV].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_SIO0_RCV].chip = &opsput_pld_irq_type;
-	irq_desc[PLD_IRQ_SIO0_RCV].action = 0;
-	irq_desc[PLD_IRQ_SIO0_RCV].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_SIO0_RCV, &opsput_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_RCV)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03;
 	disable_opsput_pld_irq(PLD_IRQ_SIO0_RCV);
 
 	/* INT#1: SIO0 Send on PLD */
-	irq_desc[PLD_IRQ_SIO0_SND].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_SIO0_SND].chip = &opsput_pld_irq_type;
-	irq_desc[PLD_IRQ_SIO0_SND].action = 0;
-	irq_desc[PLD_IRQ_SIO0_SND].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_SIO0_SND, &opsput_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_SIO0_SND)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD03;
 	disable_opsput_pld_irq(PLD_IRQ_SIO0_SND);
 #endif  /* CONFIG_SERIAL_M32R_PLDSIO */
 
 	/* INT#1: CFC IREQ on PLD */
-	irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFIREQ].chip = &opsput_pld_irq_type;
-	irq_desc[PLD_IRQ_CFIREQ].action = 0;
-	irq_desc[PLD_IRQ_CFIREQ].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFIREQ, &opsput_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_CFIREQ)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01;	/* 'L' level sense */
 	disable_opsput_pld_irq(PLD_IRQ_CFIREQ);
 
 	/* INT#1: CFC Insert on PLD */
-	irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFC_INSERT].chip = &opsput_pld_irq_type;
-	irq_desc[PLD_IRQ_CFC_INSERT].action = 0;
-	irq_desc[PLD_IRQ_CFC_INSERT].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFC_INSERT, &opsput_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_CFC_INSERT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD00;	/* 'L' edge sense */
 	disable_opsput_pld_irq(PLD_IRQ_CFC_INSERT);
 
 	/* INT#1: CFC Eject on PLD */
-	irq_desc[PLD_IRQ_CFC_EJECT].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_CFC_EJECT].chip = &opsput_pld_irq_type;
-	irq_desc[PLD_IRQ_CFC_EJECT].action = 0;
-	irq_desc[PLD_IRQ_CFC_EJECT].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_CFC_EJECT, &opsput_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_CFC_EJECT)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD02;	/* 'H' edge sense */
 	disable_opsput_pld_irq(PLD_IRQ_CFC_EJECT);
 
@@ -413,14 +348,11 @@
 	enable_opsput_irq(M32R_IRQ_INT1);
 
 #if defined(CONFIG_USB)
-	outw(USBCR_OTGS, USBCR); 	/* USBCR: non-OTG */
-
-    irq_desc[OPSPUT_LCD_IRQ_USB_INT1].status = IRQ_DISABLED;
-    irq_desc[OPSPUT_LCD_IRQ_USB_INT1].chip = &opsput_lcdpld_irq_type;
-    irq_desc[OPSPUT_LCD_IRQ_USB_INT1].action = 0;
-    irq_desc[OPSPUT_LCD_IRQ_USB_INT1].depth = 1;
-    lcdpld_icu_data[irq2lcdpldirq(OPSPUT_LCD_IRQ_USB_INT1)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01;	/* "L" level sense */
-    disable_opsput_lcdpld_irq(OPSPUT_LCD_IRQ_USB_INT1);
+	outw(USBCR_OTGS, USBCR);	/* USBCR: non-OTG */
+	set_irq_chip_and_handler(OPSPUT_LCD_IRQ_USB_INT1,
+				 &opsput_lcdpld_irq_type, handle_level_irq);
+	lcdpld_icu_data[irq2lcdpldirq(OPSPUT_LCD_IRQ_USB_INT1)].icucr = PLD_ICUCR_IEN|PLD_ICUCR_ISMOD01;	/* "L" level sense */
+	disable_opsput_lcdpld_irq(OPSPUT_LCD_IRQ_USB_INT1);
 #endif
 	/*
 	 * INT2# is used for BAT, USB, AUDIO
@@ -433,10 +365,8 @@
 	/*
 	 * INT3# is used for AR
 	 */
-	irq_desc[M32R_IRQ_INT3].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_INT3].chip = &opsput_irq_type;
-	irq_desc[M32R_IRQ_INT3].action = 0;
-	irq_desc[M32R_IRQ_INT3].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_INT3, &opsput_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_INT3].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10;
 	disable_opsput_irq(M32R_IRQ_INT3);
 #endif /* CONFIG_VIDEO_M32R_AR */
diff --git a/arch/m32r/platforms/usrv/setup.c b/arch/m32r/platforms/usrv/setup.c
index 1beac7a..f3cff26 100644
--- a/arch/m32r/platforms/usrv/setup.c
+++ b/arch/m32r/platforms/usrv/setup.c
@@ -37,39 +37,30 @@
 	outl(data, port);
 }
 
-static void mask_and_ack_mappi(unsigned int irq)
+static void mask_mappi(struct irq_data *data)
 {
-	disable_mappi_irq(irq);
+	disable_mappi_irq(data->irq);
 }
 
-static void end_mappi_irq(unsigned int irq)
+static void unmask_mappi(struct irq_data *data)
 {
-	enable_mappi_irq(irq);
+	enable_mappi_irq(data->irq);
 }
 
-static unsigned int startup_mappi_irq(unsigned int irq)
-{
-	enable_mappi_irq(irq);
-	return 0;
-}
-
-static void shutdown_mappi_irq(unsigned int irq)
+static void shutdown_mappi(struct irq_data *data)
 {
 	unsigned long port;
 
-	port = irq2port(irq);
+	port = irq2port(data->irq);
 	outl(M32R_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip mappi_irq_type =
 {
-	.name = "M32700-IRQ",
-	.startup = startup_mappi_irq,
-	.shutdown = shutdown_mappi_irq,
-	.enable = enable_mappi_irq,
-	.disable = disable_mappi_irq,
-	.ack = mask_and_ack_mappi,
-	.end = end_mappi_irq
+	.name		= "M32700-IRQ",
+	.irq_shutdown	= shutdown_mappi,
+	.irq_mask	= mask_mappi,
+	.irq_unmask	= unmask_mappi,
 };
 
 /*
@@ -107,42 +98,33 @@
 	outw(data, port);
 }
 
-static void mask_and_ack_m32700ut_pld(unsigned int irq)
+static void mask_m32700ut_pld(struct irq_data *data)
 {
-	disable_m32700ut_pld_irq(irq);
+	disable_m32700ut_pld_irq(data->irq);
 }
 
-static void end_m32700ut_pld_irq(unsigned int irq)
+static void unmask_m32700ut_pld(struct irq_data *data)
 {
-	enable_m32700ut_pld_irq(irq);
-	end_mappi_irq(M32R_IRQ_INT1);
+	enable_m32700ut_pld_irq(data->irq);
+	enable_mappi_irq(M32R_IRQ_INT1);
 }
 
-static unsigned int startup_m32700ut_pld_irq(unsigned int irq)
-{
-	enable_m32700ut_pld_irq(irq);
-	return 0;
-}
-
-static void shutdown_m32700ut_pld_irq(unsigned int irq)
+static void shutdown_m32700ut_pld(struct irq_data *data)
 {
 	unsigned long port;
 	unsigned int pldirq;
 
-	pldirq = irq2pldirq(irq);
+	pldirq = irq2pldirq(data->irq);
 	port = pldirq2port(pldirq);
 	outw(PLD_ICUCR_ILEVEL7, port);
 }
 
 static struct irq_chip m32700ut_pld_irq_type =
 {
-	.name = "USRV-PLD-IRQ",
-	.startup = startup_m32700ut_pld_irq,
-	.shutdown = shutdown_m32700ut_pld_irq,
-	.enable = enable_m32700ut_pld_irq,
-	.disable = disable_m32700ut_pld_irq,
-	.ack = mask_and_ack_m32700ut_pld,
-	.end = end_m32700ut_pld_irq
+	.name		= "USRV-PLD-IRQ",
+	.irq_shutdown	= shutdown_m32700ut_pld,
+	.irq_mask	= mask_m32700ut_pld,
+	.irq_unmask	= unmask_m32700ut_pld,
 };
 
 void __init init_IRQ(void)
@@ -156,53 +138,42 @@
 		once++;
 
 	/* MFT2 : system timer */
-	irq_desc[M32R_IRQ_MFT2].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_MFT2].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_MFT2].action = 0;
-	irq_desc[M32R_IRQ_MFT2].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_MFT2, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_MFT2].icucr = M32R_ICUCR_IEN;
 	disable_mappi_irq(M32R_IRQ_MFT2);
 
 #if defined(CONFIG_SERIAL_M32R_SIO)
 	/* SIO0_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO0_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_R].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_SIO0_R].action = 0;
-	irq_desc[M32R_IRQ_SIO0_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_R, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_R].icucr = 0;
 	disable_mappi_irq(M32R_IRQ_SIO0_R);
 
 	/* SIO0_S : uart send data */
-	irq_desc[M32R_IRQ_SIO0_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO0_S].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_SIO0_S].action = 0;
-	irq_desc[M32R_IRQ_SIO0_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO0_S, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO0_S].icucr = 0;
 	disable_mappi_irq(M32R_IRQ_SIO0_S);
 
 	/* SIO1_R : uart receive data */
-	irq_desc[M32R_IRQ_SIO1_R].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_R].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_SIO1_R].action = 0;
-	irq_desc[M32R_IRQ_SIO1_R].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_R, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_R].icucr = 0;
 	disable_mappi_irq(M32R_IRQ_SIO1_R);
 
 	/* SIO1_S : uart send data */
-	irq_desc[M32R_IRQ_SIO1_S].status = IRQ_DISABLED;
-	irq_desc[M32R_IRQ_SIO1_S].chip = &mappi_irq_type;
-	irq_desc[M32R_IRQ_SIO1_S].action = 0;
-	irq_desc[M32R_IRQ_SIO1_S].depth = 1;
+	set_irq_chip_and_handler(M32R_IRQ_SIO1_S, &mappi_irq_type,
+				 handle_level_irq);
 	icu_data[M32R_IRQ_SIO1_S].icucr = 0;
 	disable_mappi_irq(M32R_IRQ_SIO1_S);
 #endif  /* CONFIG_SERIAL_M32R_SIO */
 
 	/* INT#67-#71: CFC#0 IREQ on PLD */
 	for (i = 0 ; i < CONFIG_M32R_CFC_NUM ; i++ ) {
-		irq_desc[PLD_IRQ_CF0 + i].status = IRQ_DISABLED;
-		irq_desc[PLD_IRQ_CF0 + i].chip = &m32700ut_pld_irq_type;
-		irq_desc[PLD_IRQ_CF0 + i].action = 0;
-		irq_desc[PLD_IRQ_CF0 + i].depth = 1;	/* disable nested irq */
+		set_irq_chip_and_handler(PLD_IRQ_CF0 + i,
+					 &m32700ut_pld_irq_type,
+					 handle_level_irq);
 		pld_icu_data[irq2pldirq(PLD_IRQ_CF0 + i)].icucr
 			= PLD_ICUCR_ISMOD01;	/* 'L' level sense */
 		disable_m32700ut_pld_irq(PLD_IRQ_CF0 + i);
@@ -210,19 +181,15 @@
 
 #if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE)
 	/* INT#76: 16552D#0 IREQ on PLD */
-	irq_desc[PLD_IRQ_UART0].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_UART0].chip = &m32700ut_pld_irq_type;
-	irq_desc[PLD_IRQ_UART0].action = 0;
-	irq_desc[PLD_IRQ_UART0].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_UART0, &m32700ut_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_UART0)].icucr
 		= PLD_ICUCR_ISMOD03;	/* 'H' level sense */
 	disable_m32700ut_pld_irq(PLD_IRQ_UART0);
 
 	/* INT#77: 16552D#1 IREQ on PLD */
-	irq_desc[PLD_IRQ_UART1].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_UART1].chip = &m32700ut_pld_irq_type;
-	irq_desc[PLD_IRQ_UART1].action = 0;
-	irq_desc[PLD_IRQ_UART1].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_UART1, &m32700ut_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_UART1)].icucr
 		= PLD_ICUCR_ISMOD03;	/* 'H' level sense */
 	disable_m32700ut_pld_irq(PLD_IRQ_UART1);
@@ -230,10 +197,8 @@
 
 #if defined(CONFIG_IDC_AK4524) || defined(CONFIG_IDC_AK4524_MODULE)
 	/* INT#80: AK4524 IREQ on PLD */
-	irq_desc[PLD_IRQ_SNDINT].status = IRQ_DISABLED;
-	irq_desc[PLD_IRQ_SNDINT].chip = &m32700ut_pld_irq_type;
-	irq_desc[PLD_IRQ_SNDINT].action = 0;
-	irq_desc[PLD_IRQ_SNDINT].depth = 1;	/* disable nested irq */
+	set_irq_chip_and_handler(PLD_IRQ_SNDINT, &m32700ut_pld_irq_type,
+				 handle_level_irq);
 	pld_icu_data[irq2pldirq(PLD_IRQ_SNDINT)].icucr
 		= PLD_ICUCR_ISMOD01;	/* 'L' level sense */
 	disable_m32700ut_pld_irq(PLD_IRQ_SNDINT);
diff --git a/arch/m68k/amiga/config.c b/arch/m68k/amiga/config.c
index b1577f7..82a4bb5 100644
--- a/arch/m68k/amiga/config.c
+++ b/arch/m68k/amiga/config.c
@@ -610,17 +610,17 @@
 
 static int __init amiga_savekmsg_setup(char *arg)
 {
-	static struct resource debug_res = { .name = "Debug" };
-
 	if (!MACH_IS_AMIGA || strcmp(arg, "mem"))
-		goto done;
+		return 0;
 
-	if (!AMIGAHW_PRESENT(CHIP_RAM)) {
-		printk("Warning: no chipram present for debugging\n");
-		goto done;
+	if (amiga_chip_size < SAVEKMSG_MAXMEM) {
+		pr_err("Not enough chipram for debugging\n");
+		return -ENOMEM;
 	}
 
-	savekmsg = amiga_chip_alloc_res(SAVEKMSG_MAXMEM, &debug_res);
+	/* Just steal the block, the chipram allocator isn't functional yet */
+	amiga_chip_size -= SAVEKMSG_MAXMEM;
+	savekmsg = (void *)ZTWO_VADDR(CHIP_PHYSADDR + amiga_chip_size);
 	savekmsg->magic1 = SAVEKMSG_MAGIC1;
 	savekmsg->magic2 = SAVEKMSG_MAGIC2;
 	savekmsg->magicptr = ZTWO_PADDR(savekmsg);
@@ -628,8 +628,6 @@
 
 	amiga_console_driver.write = amiga_mem_console_write;
 	register_console(&amiga_console_driver);
-
-done:
 	return 0;
 }
 
diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
index 39478dd..26a804e 100644
--- a/arch/m68k/atari/ataints.c
+++ b/arch/m68k/atari/ataints.c
@@ -388,9 +388,9 @@
 	}
 
 	if (ATARIHW_PRESENT(SCC) && !atari_SCC_reset_done) {
-		scc.cha_a_ctrl = 9;
+		atari_scc.cha_a_ctrl = 9;
 		MFPDELAY();
-		scc.cha_a_ctrl = (char) 0xc0; /* hardware reset */
+		atari_scc.cha_a_ctrl = (char) 0xc0; /* hardware reset */
 	}
 
 	if (ATARIHW_PRESENT(SCU)) {
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index ae2d96e..4203d10 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -315,7 +315,7 @@
 		ATARIHW_SET(SCC_DMA);
 		printk("SCC_DMA ");
 	}
-	if (scc_test(&scc.cha_a_ctrl)) {
+	if (scc_test(&atari_scc.cha_a_ctrl)) {
 		ATARIHW_SET(SCC);
 		printk("SCC ");
 	}
diff --git a/arch/m68k/atari/debug.c b/arch/m68k/atari/debug.c
index 28efdc3..5a48424 100644
--- a/arch/m68k/atari/debug.c
+++ b/arch/m68k/atari/debug.c
@@ -53,9 +53,9 @@
 {
 	do {
 		MFPDELAY();
-	} while (!(scc.cha_b_ctrl & 0x04)); /* wait for tx buf empty */
+	} while (!(atari_scc.cha_b_ctrl & 0x04)); /* wait for tx buf empty */
 	MFPDELAY();
-	scc.cha_b_data = c;
+	atari_scc.cha_b_data = c;
 }
 
 static void atari_scc_console_write(struct console *co, const char *str,
@@ -140,9 +140,9 @@
 {
 	do {
 		MFPDELAY();
-	} while (!(scc.cha_b_ctrl & 0x01)); /* wait for rx buf filled */
+	} while (!(atari_scc.cha_b_ctrl & 0x01)); /* wait for rx buf filled */
 	MFPDELAY();
-	return scc.cha_b_data;
+	return atari_scc.cha_b_data;
 }
 
 int atari_midi_console_wait_key(struct console *co)
@@ -185,9 +185,9 @@
 
 #define SCC_WRITE(reg, val)				\
 	do {						\
-		scc.cha_b_ctrl = (reg);			\
+		atari_scc.cha_b_ctrl = (reg);		\
 		MFPDELAY();				\
-		scc.cha_b_ctrl = (val);			\
+		atari_scc.cha_b_ctrl = (val);		\
 		MFPDELAY();				\
 	} while (0)
 
@@ -240,7 +240,7 @@
 	reg3 = (cflag & CSIZE) == CS8 ? 0xc0 : 0x40;
 	reg5 = (cflag & CSIZE) == CS8 ? 0x60 : 0x20 | 0x82 /* assert DTR/RTS */;
 
-	(void)scc.cha_b_ctrl;		/* reset reg pointer */
+	(void)atari_scc.cha_b_ctrl;	/* reset reg pointer */
 	SCC_WRITE(9, 0xc0);		/* reset */
 	LONG_DELAY();			/* extra delay after WR9 access */
 	SCC_WRITE(4, (cflag & PARENB) ? ((cflag & PARODD) ? 0x01 : 0x03)
diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h
index a714e1a..f51f709 100644
--- a/arch/m68k/include/asm/atarihw.h
+++ b/arch/m68k/include/asm/atarihw.h
@@ -449,7 +449,7 @@
   u_char char_dummy3;
   u_char cha_b_data;
  };
-# define scc ((*(volatile struct SCC*)SCC_BAS))
+# define atari_scc ((*(volatile struct SCC*)SCC_BAS))
 
 /* The ESCC (Z85230) in an Atari ST. The channels are reversed! */
 # define st_escc ((*(volatile struct SCC*)0xfffffa31))
diff --git a/arch/m68k/include/asm/string.h b/arch/m68k/include/asm/string.h
index 2936dda..3219845 100644
--- a/arch/m68k/include/asm/string.h
+++ b/arch/m68k/include/asm/string.h
@@ -81,18 +81,6 @@
 	strcpy(__d + strlen(__d), (s));		\
 })
 
-#define __HAVE_ARCH_STRCHR
-static inline char *strchr(const char *s, int c)
-{
-	char sc, ch = c;
-
-	for (; (sc = *s++) != ch; ) {
-		if (!sc)
-			return NULL;
-	}
-	return (char *)s - 1;
-}
-
 #ifndef CONFIG_COLDFIRE
 #define __HAVE_ARCH_STRCMP
 static inline int strcmp(const char *cs, const char *ct)
@@ -111,14 +99,12 @@
 		: "+a" (cs), "+a" (ct), "=d" (res));
 	return res;
 }
+#endif /* CONFIG_COLDFIRE */
 
 #define __HAVE_ARCH_MEMMOVE
 extern void *memmove(void *, const void *, __kernel_size_t);
 
-#define __HAVE_ARCH_MEMCMP
-extern int memcmp(const void *, const void *, __kernel_size_t);
 #define memcmp(d, s, n) __builtin_memcmp(d, s, n)
-#endif /* CONFIG_COLDFIRE */
 
 #define __HAVE_ARCH_MEMSET
 extern void *memset(void *, int, __kernel_size_t);
diff --git a/arch/m68k/lib/string.c b/arch/m68k/lib/string.c
index 4253f87..d399c5f 100644
--- a/arch/m68k/lib/string.c
+++ b/arch/m68k/lib/string.c
@@ -243,14 +243,3 @@
 	return xdest;
 }
 EXPORT_SYMBOL(memmove);
-
-int memcmp(const void *cs, const void *ct, size_t count)
-{
-	const unsigned char *su1, *su2;
-
-	for (su1 = cs, su2 = ct; count > 0; ++su1, ++su2, count--)
-		if (*su1 != *su2)
-			return *su1 < *su2 ? -1 : +1;
-	return 0;
-}
-EXPORT_SYMBOL(memcmp);
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 704e7b9..8b9daca 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -2,6 +2,7 @@
 	bool
 	default y
 	select HAVE_IDE
+	select HAVE_GENERIC_HARDIRQS
 
 config MMU
 	bool
@@ -48,14 +49,6 @@
 	bool
 	default y
 
-config GENERIC_HARDIRQS
-	bool
-	default y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	bool
-	default y
-
 config GENERIC_CALIBRATE_DELAY
 	bool
 	default y
diff --git a/arch/m68knommu/configs/m5208evb_defconfig b/arch/m68knommu/configs/m5208evb_defconfig
index 6ac2981..2f5655c 100644
--- a/arch/m68knommu/configs/m5208evb_defconfig
+++ b/arch/m68knommu/configs/m5208evb_defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/m68knommu/configs/m5249evb_defconfig b/arch/m68knommu/configs/m5249evb_defconfig
index 14934ff..16df72b 100644
--- a/arch/m68knommu/configs/m5249evb_defconfig
+++ b/arch/m68knommu/configs/m5249evb_defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/m68knommu/configs/m5272c3_defconfig b/arch/m68knommu/configs/m5272c3_defconfig
index 5985a3b..4e6ea50 100644
--- a/arch/m68knommu/configs/m5272c3_defconfig
+++ b/arch/m68knommu/configs/m5272c3_defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/m68knommu/configs/m5275evb_defconfig b/arch/m68knommu/configs/m5275evb_defconfig
index 5a7857e..f3dd741 100644
--- a/arch/m68knommu/configs/m5275evb_defconfig
+++ b/arch/m68knommu/configs/m5275evb_defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/m68knommu/configs/m5307c3_defconfig b/arch/m68knommu/configs/m5307c3_defconfig
index e810201..bce0a20 100644
--- a/arch/m68knommu/configs/m5307c3_defconfig
+++ b/arch/m68knommu/configs/m5307c3_defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/m68knommu/configs/m5407c3_defconfig b/arch/m68knommu/configs/m5407c3_defconfig
index 5c124a7..618cc32 100644
--- a/arch/m68knommu/configs/m5407c3_defconfig
+++ b/arch/m68knommu/configs/m5407c3_defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/m68knommu/defconfig b/arch/m68knommu/defconfig
index 6ac2981..2f5655c 100644
--- a/arch/m68knommu/defconfig
+++ b/arch/m68knommu/defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_FUTEX is not set
diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S
index ef33213..47e15eb 100644
--- a/arch/m68knommu/kernel/vmlinux.lds.S
+++ b/arch/m68knommu/kernel/vmlinux.lds.S
@@ -141,6 +141,12 @@
 		*(__param)
 		__stop___param = .;
 
+		/* Built-in module versions */
+		. = ALIGN(4) ;
+		__start___modver = .;
+		*(__modver)
+		__stop___modver = .;
+
 		. = ALIGN(4) ;
 		_etext = . ;
 	} > TEXT
diff --git a/arch/m68knommu/lib/Makefile b/arch/m68knommu/lib/Makefile
index d94d709..32d852e 100644
--- a/arch/m68knommu/lib/Makefile
+++ b/arch/m68knommu/lib/Makefile
@@ -4,4 +4,4 @@
 
 lib-y	:= ashldi3.o ashrdi3.o lshrdi3.o \
 	   muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \
-	   checksum.o memcpy.o memset.o delay.o
+	   checksum.o memcpy.o memmove.o memset.o delay.o
diff --git a/arch/m68knommu/lib/memmove.c b/arch/m68knommu/lib/memmove.c
new file mode 100644
index 0000000..b3dcfe9
--- /dev/null
+++ b/arch/m68knommu/lib/memmove.c
@@ -0,0 +1,105 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#define __IN_STRING_C
+
+#include <linux/module.h>
+#include <linux/string.h>
+
+void *memmove(void *dest, const void *src, size_t n)
+{
+	void *xdest = dest;
+	size_t temp;
+
+	if (!n)
+		return xdest;
+
+	if (dest < src) {
+		if ((long)dest & 1) {
+			char *cdest = dest;
+			const char *csrc = src;
+			*cdest++ = *csrc++;
+			dest = cdest;
+			src = csrc;
+			n--;
+		}
+		if (n > 2 && (long)dest & 2) {
+			short *sdest = dest;
+			const short *ssrc = src;
+			*sdest++ = *ssrc++;
+			dest = sdest;
+			src = ssrc;
+			n -= 2;
+		}
+		temp = n >> 2;
+		if (temp) {
+			long *ldest = dest;
+			const long *lsrc = src;
+			temp--;
+			do
+				*ldest++ = *lsrc++;
+			while (temp--);
+			dest = ldest;
+			src = lsrc;
+		}
+		if (n & 2) {
+			short *sdest = dest;
+			const short *ssrc = src;
+			*sdest++ = *ssrc++;
+			dest = sdest;
+			src = ssrc;
+		}
+		if (n & 1) {
+			char *cdest = dest;
+			const char *csrc = src;
+			*cdest = *csrc;
+		}
+	} else {
+		dest = (char *)dest + n;
+		src = (const char *)src + n;
+		if ((long)dest & 1) {
+			char *cdest = dest;
+			const char *csrc = src;
+			*--cdest = *--csrc;
+			dest = cdest;
+			src = csrc;
+			n--;
+		}
+		if (n > 2 && (long)dest & 2) {
+			short *sdest = dest;
+			const short *ssrc = src;
+			*--sdest = *--ssrc;
+			dest = sdest;
+			src = ssrc;
+			n -= 2;
+		}
+		temp = n >> 2;
+		if (temp) {
+			long *ldest = dest;
+			const long *lsrc = src;
+			temp--;
+			do
+				*--ldest = *--lsrc;
+			while (temp--);
+			dest = ldest;
+			src = lsrc;
+		}
+		if (n & 2) {
+			short *sdest = dest;
+			const short *ssrc = src;
+			*--sdest = *--ssrc;
+			dest = sdest;
+			src = ssrc;
+		}
+		if (n & 1) {
+			char *cdest = dest;
+			const char *csrc = src;
+			*--cdest = *--csrc;
+		}
+	}
+	return xdest;
+}
+EXPORT_SYMBOL(memmove);
diff --git a/arch/m68knommu/platform/5249/intc2.c b/arch/m68knommu/platform/5249/intc2.c
index d09d9da..c5151f8 100644
--- a/arch/m68knommu/platform/5249/intc2.c
+++ b/arch/m68knommu/platform/5249/intc2.c
@@ -50,8 +50,10 @@
 	int irq;
 
 	/* GPIO interrupt sources */
-	for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++)
+	for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) {
 		irq_desc[irq].chip = &intc2_irq_gpio_chip;
+		set_irq_handler(irq, handle_edge_irq);
+	}
 
 	return 0;
 }
diff --git a/arch/m68knommu/platform/68328/entry.S b/arch/m68knommu/platform/68328/entry.S
index 240a7a6..676960c 100644
--- a/arch/m68knommu/platform/68328/entry.S
+++ b/arch/m68knommu/platform/68328/entry.S
@@ -108,7 +108,6 @@
 	movel	%d1,%a2
 1:
 	move	%a2@(TI_FLAGS),%d1	/* thread_info->flags */
-	andl	#_TIF_WORK_MASK,%d1
 	jne	Lwork_to_do
 	RESTORE_ALL
 
diff --git a/arch/m68knommu/platform/68360/commproc.c b/arch/m68knommu/platform/68360/commproc.c
index f27e688..8e4e10c 100644
--- a/arch/m68knommu/platform/68360/commproc.c
+++ b/arch/m68knommu/platform/68360/commproc.c
@@ -210,7 +210,7 @@
 cpm_install_handler(int vec, void (*handler)(), void *dev_id)
 {
 
-	request_irq(vec, handler, IRQ_FLG_LOCK, "timer", dev_id);
+	request_irq(vec, handler, 0, "timer", dev_id);
 
 /* 	if (cpm_vecs[vec].handler != 0) */
 /* 		printk(KERN_INFO "CPM interrupt %x replacing %x\n", */
diff --git a/arch/m68knommu/platform/68360/config.c b/arch/m68knommu/platform/68360/config.c
index ac629fa3..9dd5bca 100644
--- a/arch/m68knommu/platform/68360/config.c
+++ b/arch/m68knommu/platform/68360/config.c
@@ -75,7 +75,7 @@
   /* Set compare register  32Khz / 32 / 10 = 100 */
   TCMP = 10;                                                              
 
-  request_irq(IRQ_MACHSPEC | 1, timer_routine, IRQ_FLG_LOCK, "timer", NULL);
+  request_irq(IRQ_MACHSPEC | 1, timer_routine, 0, "timer", NULL);
 #endif
 
   /* General purpose quicc timers: MC68360UM p7-20 */
diff --git a/arch/m68knommu/platform/68360/entry.S b/arch/m68knommu/platform/68360/entry.S
index 8a28788..46c1b18 100644
--- a/arch/m68knommu/platform/68360/entry.S
+++ b/arch/m68knommu/platform/68360/entry.S
@@ -104,7 +104,6 @@
 	movel	%d1,%a2
 1:
 	move	%a2@(TI_FLAGS),%d1	/* thread_info->flags */
-	andl	#_TIF_WORK_MASK,%d1
 	jne	Lwork_to_do
 	RESTORE_ALL
 
diff --git a/arch/m68knommu/platform/68360/ints.c b/arch/m68knommu/platform/68360/ints.c
index ad96ab1..a29041c 100644
--- a/arch/m68knommu/platform/68360/ints.c
+++ b/arch/m68knommu/platform/68360/ints.c
@@ -132,8 +132,8 @@
 	pquicc->intr_cimr = 0x00000000;
 
 	for (i = 0; (i < NR_IRQS); i++) {
-		set_irq_chip(irq, &intc_irq_chip);
-		set_irq_handler(irq, handle_level_irq);
+		set_irq_chip(i, &intc_irq_chip);
+		set_irq_handler(i, handle_level_irq);
 	}
 }
 
diff --git a/arch/m68knommu/platform/coldfire/entry.S b/arch/m68knommu/platform/coldfire/entry.S
index 4ddfc3d..5837cf0 100644
--- a/arch/m68knommu/platform/coldfire/entry.S
+++ b/arch/m68knommu/platform/coldfire/entry.S
@@ -138,7 +138,6 @@
 	andl	#-THREAD_SIZE,%d1	/* at base of kernel stack */
 	movel	%d1,%a0
 	movel	%a0@(TI_FLAGS),%d1	/* get thread_info->flags */
-	andl	#0xefff,%d1
 	jne	Lwork_to_do		/* still work to do */
 
 Lreturn:
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 5f5018a..3168003 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -15,6 +15,8 @@
 	select TRACING_SUPPORT
 	select OF
 	select OF_EARLY_FLATTREE
+	select HAVE_GENERIC_HARDIRQS
+	select GENERIC_IRQ_PROBE
 
 config SWAP
 	def_bool n
@@ -37,12 +39,6 @@
 config GENERIC_HWEIGHT
 	def_bool y
 
-config GENERIC_HARDIRQS
-	def_bool y
-
-config GENERIC_IRQ_PROBE
-	def_bool y
-
 config GENERIC_CALIBRATE_DELAY
 	def_bool y
 
@@ -52,9 +48,6 @@
 config GENERIC_CLOCKEVENTS
 	def_bool y
 
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
 config GENERIC_GPIO
 	def_bool y
 
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index ab8fbe7..b3f5eec 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -7,7 +7,7 @@
 CONFIG_INITRAMFS_SOURCE="rootfs.cpio"
 CONFIG_INITRAMFS_COMPRESSION_GZIP=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 # CONFIG_HOTPLUG is not set
diff --git a/arch/microblaze/configs/nommu_defconfig b/arch/microblaze/configs/nommu_defconfig
index ebc143c..0249e4b 100644
--- a/arch/microblaze/configs/nommu_defconfig
+++ b/arch/microblaze/configs/nommu_defconfig
@@ -6,7 +6,7 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 # CONFIG_HOTPLUG is not set
diff --git a/arch/microblaze/include/asm/irqflags.h b/arch/microblaze/include/asm/irqflags.h
index 5fd3190..c4532f0 100644
--- a/arch/microblaze/include/asm/irqflags.h
+++ b/arch/microblaze/include/asm/irqflags.h
@@ -12,7 +12,7 @@
 #include <linux/types.h>
 #include <asm/registers.h>
 
-#ifdef CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
+#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
 
 static inline unsigned long arch_local_irq_save(void)
 {
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index b23f680..885574a 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -411,20 +411,19 @@
 static inline unsigned long pte_update(pte_t *p, unsigned long clr,
 				unsigned long set)
 {
-	unsigned long old, tmp, msr;
+	unsigned long flags, old, tmp;
 
-	__asm__ __volatile__("\
-	msrclr	%2, 0x2\n\
-	nop\n\
-	lw	%0, %4, r0\n\
-	andn	%1, %0, %5\n\
-	or	%1, %1, %6\n\
-	sw	%1, %4, r0\n\
-	mts     rmsr, %2\n\
-	nop"
-	: "=&r" (old), "=&r" (tmp), "=&r" (msr), "=m" (*p)
-	: "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set), "m" (*p)
-	: "cc");
+	raw_local_irq_save(flags);
+
+	__asm__ __volatile__(	"lw	%0, %2, r0	\n"
+				"andn	%1, %0, %3	\n"
+				"or	%1, %1, %4	\n"
+				"sw	%1, %2, r0	\n"
+			: "=&r" (old), "=&r" (tmp)
+			: "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set)
+			: "cc");
+
+	raw_local_irq_restore(flags);
 
 	return old;
 }
diff --git a/arch/microblaze/kernel/cpu/pvr.c b/arch/microblaze/kernel/cpu/pvr.c
index e01afa6..488c1ed 100644
--- a/arch/microblaze/kernel/cpu/pvr.c
+++ b/arch/microblaze/kernel/cpu/pvr.c
@@ -27,7 +27,7 @@
 	register unsigned tmp __asm__("r3");			\
 	tmp = 0x0;	/* Prevent warning about unused */	\
 	__asm__ __volatile__ (					\
-			"mfs	%0, rpvr" #pvrid ";"	\
+			"mfs	%0, rpvr" #pvrid ";"		\
 			: "=r" (tmp) : : "memory"); 		\
 	val = tmp;						\
 }
@@ -54,7 +54,7 @@
 	if (!(flags & PVR_MSR_BIT))
 		return 0;
 
-	get_single_pvr(0x00, pvr0);
+	get_single_pvr(0, pvr0);
 	pr_debug("%s: pvr0 is 0x%08x\n", __func__, pvr0);
 
 	if (pvr0 & PVR0_PVR_FULL_MASK)
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index 4243400..778a5ce 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -62,23 +62,32 @@
 	andi	r1, r1, ~2
 	mts	rmsr, r1
 /*
- * Here is checking mechanism which check if Microblaze has msr instructions
- * We load msr and compare it with previous r1 value - if is the same,
- * msr instructions works if not - cpu don't have them.
+ * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc'
+ * if the msrclr instruction is not enabled. We use this to detect
+ * if the opcode is available, by issuing msrclr and then testing the result.
+ * r8 == 0 - msr instructions are implemented
+ * r8 != 0 - msr instructions are not implemented
  */
-	/* r8=0 - I have msr instr, 1 - I don't have them */
-	rsubi	r0, r0, 1	/* set the carry bit */
-	msrclr	r0, 0x4		/* try to clear it */
-	/* read the carry bit, r8 will be '0' if msrclr exists */
-	addik	r8, r0, 0
+	msrclr	r8, 0 /* clear nothing - just read msr for test */
+	cmpu	r8, r8, r1 /* r1 must contain msr reg content */
 
 /* r7 may point to an FDT, or there may be one linked in.
    if it's in r7, we've got to save it away ASAP.
    We ensure r7 points to a valid FDT, just in case the bootloader
    is broken or non-existent */
 	beqi	r7, no_fdt_arg			/* NULL pointer?  don't copy */
-	lw	r11, r0, r7			/* Does r7 point to a */
-	rsubi	r11, r11, OF_DT_HEADER		/* valid FDT? */
+/* Does r7 point to a valid FDT? Load HEADER magic number */
+	/* Run time Big/Little endian platform */
+	/* Save 1 as word and load byte - 0 - BIG, 1 - LITTLE */
+	addik	r11, r0, 0x1 /* BIG/LITTLE checking value */
+	/* __bss_start will be zeroed later - it is just temp location */
+	swi	r11, r0, TOPHYS(__bss_start)
+	lbui	r11, r0, TOPHYS(__bss_start)
+	beqid	r11, big_endian /* DO NOT break delay stop dependency */
+	lw	r11, r0, r7 /* Big endian load in delay slot */
+	lwr	r11, r0, r7 /* Little endian load */
+big_endian:
+	rsubi	r11, r11, OF_DT_HEADER	/* Check FDT header */
 	beqi	r11, _prepare_copy_fdt
 	or	r7, r0, r0		/* clear R7 when not valid DTB */
 	bnei	r11, no_fdt_arg			/* No - get out of here */
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 25f6e07..782680d 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -147,10 +147,6 @@
 	#if CONFIG_XILINX_MICROBLAZE0_USE_BARREL > 0
 	#define BSRLI(rD, rA, imm)	\
 		bsrli rD, rA, imm
-	#elif CONFIG_XILINX_MICROBLAZE0_USE_DIV > 0
-	#define BSRLI(rD, rA, imm)	\
-		ori rD, r0, (1 << imm);	\
-		idivu rD, rD, rA
 	#else
 	#define BSRLI(rD, rA, imm) BSRLI ## imm (rD, rA)
 	/* Only the used shift constants defined here - add more if needed */
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index bb1558e..9312fbb 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -161,11 +161,11 @@
 #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
 	if (msr)
 		eprintk("!!!Your kernel has setup MSR instruction but "
-				"CPU don't have it %d\n", msr);
+				"CPU don't have it %x\n", msr);
 #else
 	if (!msr)
 		eprintk("!!!Your kernel not setup MSR instruction but "
-				"CPU have it %d\n", msr);
+				"CPU have it %x\n", msr);
 #endif
 
 	for (src = __ivt_start; src < __ivt_end; src++, dst++)
diff --git a/arch/microblaze/lib/fastcopy.S b/arch/microblaze/lib/fastcopy.S
index fdc48bb..62021d7 100644
--- a/arch/microblaze/lib/fastcopy.S
+++ b/arch/microblaze/lib/fastcopy.S
@@ -29,6 +29,10 @@
  *	between mem locations with size of xfer spec'd in bytes
  */
 
+#ifdef __MICROBLAZEEL__
+#error Microblaze LE not support ASM optimized lib func. Disable OPT_LIB_ASM.
+#endif
+
 #include <linux/linkage.h>
 	.text
 	.globl	memcpy
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 548e6cc..f5ecc05 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -793,9 +793,6 @@
 	bool
 	default y
 
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
 #
 # Select some configuration options automatically based on user selections.
 #
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index f437cd1..5358f90 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -7,7 +7,7 @@
 source "lib/Kconfig.debug"
 
 config EARLY_PRINTK
-	bool "Early printk" if EMBEDDED
+	bool "Early printk" if EXPERT
 	depends on SYS_HAS_EARLY_PRINTK
 	default y
 	help
diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig
index c78c7e7..6cd5a51 100644
--- a/arch/mips/configs/ar7_defconfig
+++ b/arch/mips/configs/ar7_defconfig
@@ -14,7 +14,7 @@
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_RD_LZMA=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig
index 927d58b..22fdf2f 100644
--- a/arch/mips/configs/bcm47xx_defconfig
+++ b/arch/mips/configs/bcm47xx_defconfig
@@ -21,7 +21,7 @@
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_RD_LZMA=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/bcm63xx_defconfig b/arch/mips/configs/bcm63xx_defconfig
index b806a4e..9190051 100644
--- a/arch/mips/configs/bcm63xx_defconfig
+++ b/arch/mips/configs/bcm63xx_defconfig
@@ -10,7 +10,7 @@
 # CONFIG_SWAP is not set
 CONFIG_TINY_RCU=y
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_PCSPKR_PLATFORM is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index 9749bc8..1cdff6b 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -26,7 +26,7 @@
 CONFIG_NET_NS=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_PCSPKR_PLATFORM is not set
 CONFIG_SLAB=y
diff --git a/arch/mips/configs/capcella_defconfig b/arch/mips/configs/capcella_defconfig
index 502a8e9..5135dc0 100644
--- a/arch/mips/configs/capcella_defconfig
+++ b/arch/mips/configs/capcella_defconfig
@@ -4,7 +4,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/cavium-octeon_defconfig b/arch/mips/configs/cavium-octeon_defconfig
index 3567b6f..75165df 100644
--- a/arch/mips/configs/cavium-octeon_defconfig
+++ b/arch/mips/configs/cavium-octeon_defconfig
@@ -15,7 +15,7 @@
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_PCSPKR_PLATFORM is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/cobalt_defconfig b/arch/mips/configs/cobalt_defconfig
index 6c4f7e9..5419adb 100644
--- a/arch/mips/configs/cobalt_defconfig
+++ b/arch/mips/configs/cobalt_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/mips/configs/db1000_defconfig b/arch/mips/configs/db1000_defconfig
index dda158b..4044c9e 100644
--- a/arch/mips/configs/db1000_defconfig
+++ b/arch/mips/configs/db1000_defconfig
@@ -11,7 +11,7 @@
 CONFIG_TINY_RCU=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_PCSPKR_PLATFORM is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/mips/configs/db1100_defconfig b/arch/mips/configs/db1100_defconfig
index 7e4fc76..c6b4993 100644
--- a/arch/mips/configs/db1100_defconfig
+++ b/arch/mips/configs/db1100_defconfig
@@ -11,7 +11,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_TINY_RCU=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/db1200_defconfig b/arch/mips/configs/db1200_defconfig
index 6fe205f..1f69249 100644
--- a/arch/mips/configs/db1200_defconfig
+++ b/arch/mips/configs/db1200_defconfig
@@ -12,7 +12,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_TINY_RCU=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/db1500_defconfig b/arch/mips/configs/db1500_defconfig
index a741c554..b6e21c7 100644
--- a/arch/mips/configs/db1500_defconfig
+++ b/arch/mips/configs/db1500_defconfig
@@ -10,7 +10,7 @@
 CONFIG_KERNEL_LZMA=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_PCSPKR_PLATFORM is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/mips/configs/db1550_defconfig b/arch/mips/configs/db1550_defconfig
index cd32dd8..798a553 100644
--- a/arch/mips/configs/db1550_defconfig
+++ b/arch/mips/configs/db1550_defconfig
@@ -11,7 +11,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_TINY_RCU=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig
index b15bfd1..87d0340 100644
--- a/arch/mips/configs/decstation_defconfig
+++ b/arch/mips/configs/decstation_defconfig
@@ -4,7 +4,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_HOTPLUG is not set
 CONFIG_SLAB=y
diff --git a/arch/mips/configs/e55_defconfig b/arch/mips/configs/e55_defconfig
index 0b60c06..0126e66 100644
--- a/arch/mips/configs/e55_defconfig
+++ b/arch/mips/configs/e55_defconfig
@@ -4,7 +4,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
index 63944a1..e5b73de 100644
--- a/arch/mips/configs/fuloong2e_defconfig
+++ b/arch/mips/configs/fuloong2e_defconfig
@@ -17,7 +17,7 @@
 CONFIG_USER_NS=y
 CONFIG_PID_NS=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_PCSPKR_PLATFORM is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig
index 53edc13..48a40ae 100644
--- a/arch/mips/configs/gpr_defconfig
+++ b/arch/mips/configs/gpr_defconfig
@@ -11,7 +11,7 @@
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_PROFILING=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index 36de199..d160656 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -17,7 +17,7 @@
 CONFIG_USER_NS=y
 CONFIG_PID_NS=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 # CONFIG_PCSPKR_PLATFORM is not set
 # CONFIG_COMPAT_BRK is not set
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 4b16c48..0e36abc 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -15,7 +15,7 @@
 CONFIG_CPUSETS=y
 CONFIG_RELAY=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_PCSPKR_PLATFORM is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig
index 98f2c77..4dbf626 100644
--- a/arch/mips/configs/ip28_defconfig
+++ b/arch/mips/configs/ip28_defconfig
@@ -8,7 +8,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig
index 5bea99b..7bbd521 100644
--- a/arch/mips/configs/ip32_defconfig
+++ b/arch/mips/configs/ip32_defconfig
@@ -10,7 +10,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_RELAY=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig
index 6ae46bc..92a60ae 100644
--- a/arch/mips/configs/jazz_defconfig
+++ b/arch/mips/configs/jazz_defconfig
@@ -10,7 +10,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/jmr3927_defconfig b/arch/mips/configs/jmr3927_defconfig
index bf24e93..db5705e 100644
--- a/arch/mips/configs/jmr3927_defconfig
+++ b/arch/mips/configs/jmr3927_defconfig
@@ -4,7 +4,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 # CONFIG_PCSPKR_PLATFORM is not set
 CONFIG_SLAB=y
diff --git a/arch/mips/configs/lasat_defconfig b/arch/mips/configs/lasat_defconfig
index 6447261..d9f3db2 100644
--- a/arch/mips/configs/lasat_defconfig
+++ b/arch/mips/configs/lasat_defconfig
@@ -8,7 +8,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index f7033f3..167c1d0 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -21,7 +21,7 @@
 CONFIG_RD_BZIP2=y
 CONFIG_RD_LZMA=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index 9d03b68..7270f31 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -15,7 +15,7 @@
 CONFIG_IPC_NS=y
 CONFIG_PID_NS=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
diff --git a/arch/mips/configs/markeins_defconfig b/arch/mips/configs/markeins_defconfig
index 86bf001..9c9a123 100644
--- a/arch/mips/configs/markeins_defconfig
+++ b/arch/mips/configs/markeins_defconfig
@@ -9,7 +9,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/mipssim_defconfig b/arch/mips/configs/mipssim_defconfig
index 4925f50..b5ad738 100644
--- a/arch/mips/configs/mipssim_defconfig
+++ b/arch/mips/configs/mipssim_defconfig
@@ -7,7 +7,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/mpc30x_defconfig b/arch/mips/configs/mpc30x_defconfig
index efb779f..c16de98 100644
--- a/arch/mips/configs/mpc30x_defconfig
+++ b/arch/mips/configs/mpc30x_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/msp71xx_defconfig b/arch/mips/configs/msp71xx_defconfig
index ab05145..d1142e9 100644
--- a/arch/mips/configs/msp71xx_defconfig
+++ b/arch/mips/configs/msp71xx_defconfig
@@ -8,7 +8,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SHMEM is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 8146997..a97a42c 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -11,7 +11,7 @@
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_PROFILING=y
 CONFIG_OPROFILE=m
diff --git a/arch/mips/configs/pb1100_defconfig b/arch/mips/configs/pb1100_defconfig
index 1597aa1..75eb1b1 100644
--- a/arch/mips/configs/pb1100_defconfig
+++ b/arch/mips/configs/pb1100_defconfig
@@ -11,7 +11,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_TINY_RCU=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/pb1200_defconfig b/arch/mips/configs/pb1200_defconfig
index 96f0d43..dcbe270 100644
--- a/arch/mips/configs/pb1200_defconfig
+++ b/arch/mips/configs/pb1200_defconfig
@@ -12,7 +12,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_TINY_RCU=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/pb1500_defconfig b/arch/mips/configs/pb1500_defconfig
index b4bfd48..fa00487 100644
--- a/arch/mips/configs/pb1500_defconfig
+++ b/arch/mips/configs/pb1500_defconfig
@@ -11,7 +11,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_TINY_RCU=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/pb1550_defconfig b/arch/mips/configs/pb1550_defconfig
index 5a660024..e83d649 100644
--- a/arch/mips/configs/pb1550_defconfig
+++ b/arch/mips/configs/pb1550_defconfig
@@ -11,7 +11,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_TINY_RCU=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/pnx8335-stb225_defconfig b/arch/mips/configs/pnx8335-stb225_defconfig
index 39926a1..f292576 100644
--- a/arch/mips/configs/pnx8335-stb225_defconfig
+++ b/arch/mips/configs/pnx8335-stb225_defconfig
@@ -11,7 +11,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/pnx8550-jbs_defconfig b/arch/mips/configs/pnx8550-jbs_defconfig
index 3376bc8..1d1f206 100644
--- a/arch/mips/configs/pnx8550-jbs_defconfig
+++ b/arch/mips/configs/pnx8550-jbs_defconfig
@@ -6,7 +6,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/pnx8550-stb810_defconfig b/arch/mips/configs/pnx8550-stb810_defconfig
index 6514f1b..15c66a5 100644
--- a/arch/mips/configs/pnx8550-stb810_defconfig
+++ b/arch/mips/configs/pnx8550-stb810_defconfig
@@ -6,7 +6,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_HOTPLUG is not set
 CONFIG_SLAB=y
diff --git a/arch/mips/configs/powertv_defconfig b/arch/mips/configs/powertv_defconfig
index f1f58e91..3b0b6e8 100644
--- a/arch/mips/configs/powertv_defconfig
+++ b/arch/mips/configs/powertv_defconfig
@@ -14,7 +14,7 @@
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_RD_GZIP is not set
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_PCSPKR_PLATFORM is not set
diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig
index d6457bc..55902d9 100644
--- a/arch/mips/configs/rb532_defconfig
+++ b/arch/mips/configs/rb532_defconfig
@@ -13,7 +13,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig
index 29acfab..9cba856 100644
--- a/arch/mips/configs/rbtx49xx_defconfig
+++ b/arch/mips/configs/rbtx49xx_defconfig
@@ -12,7 +12,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 # CONFIG_PCSPKR_PLATFORM is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 2b3e476..2c0230e 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -12,7 +12,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/sb1250-swarm_defconfig b/arch/mips/configs/sb1250-swarm_defconfig
index 64840d7..5b0463e 100644
--- a/arch/mips/configs/sb1250-swarm_defconfig
+++ b/arch/mips/configs/sb1250-swarm_defconfig
@@ -15,7 +15,7 @@
 CONFIG_NAMESPACES=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/tb0219_defconfig b/arch/mips/configs/tb0219_defconfig
index d9be37f..30036b4 100644
--- a/arch/mips/configs/tb0219_defconfig
+++ b/arch/mips/configs/tb0219_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_PCSPKR_PLATFORM is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/tb0226_defconfig b/arch/mips/configs/tb0226_defconfig
index 3d25dd0..81bfa1d 100644
--- a/arch/mips/configs/tb0226_defconfig
+++ b/arch/mips/configs/tb0226_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_PCSPKR_PLATFORM is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/mips/configs/tb0287_defconfig b/arch/mips/configs/tb0287_defconfig
index be697c9..c415c4f 100644
--- a/arch/mips/configs/tb0287_defconfig
+++ b/arch/mips/configs/tb0287_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_PCSPKR_PLATFORM is not set
 CONFIG_SLAB=y
diff --git a/arch/mips/configs/workpad_defconfig b/arch/mips/configs/workpad_defconfig
index 7ec9287..ee4b2be 100644
--- a/arch/mips/configs/workpad_defconfig
+++ b/arch/mips/configs/workpad_defconfig
@@ -4,7 +4,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mips/configs/wrppmc_defconfig b/arch/mips/configs/wrppmc_defconfig
index a231b73..44a451b 100644
--- a/arch/mips/configs/wrppmc_defconfig
+++ b/arch/mips/configs/wrppmc_defconfig
@@ -7,7 +7,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 # CONFIG_EPOLL is not set
 CONFIG_SLAB=y
diff --git a/arch/mips/configs/yosemite_defconfig b/arch/mips/configs/yosemite_defconfig
index ab3a3dc..f72d305 100644
--- a/arch/mips/configs/yosemite_defconfig
+++ b/arch/mips/configs/yosemite_defconfig
@@ -8,7 +8,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_RELAY=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 8ed41cf..243bfa2 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -1,6 +1,7 @@
 config MN10300
 	def_bool y
 	select HAVE_OPROFILE
+	select GENERIC_HARDIRQS
 
 config AM33_2
 	def_bool n
@@ -34,9 +35,6 @@
 config RWSEM_XCHGADD_ALGORITHM
 	bool
 
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
 config GENERIC_CALIBRATE_DELAY
 	def_bool y
 
@@ -79,10 +77,6 @@
 config ARCH_HAS_ILOG2_U32
 	def_bool y
 
-# Use the generic interrupt handling code in kernel/irq/
-config GENERIC_HARDIRQS
-	def_bool y
-
 config HOTPLUG_CPU
 	def_bool n
 
diff --git a/arch/mn10300/configs/asb2303_defconfig b/arch/mn10300/configs/asb2303_defconfig
index 3f749b6..1fd41ec 100644
--- a/arch/mn10300/configs/asb2303_defconfig
+++ b/arch/mn10300/configs/asb2303_defconfig
@@ -4,7 +4,7 @@
 CONFIG_TINY_RCU=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/mn10300/configs/asb2364_defconfig b/arch/mn10300/configs/asb2364_defconfig
index 83ce2f2..31d7626 100644
--- a/arch/mn10300/configs/asb2364_defconfig
+++ b/arch/mn10300/configs/asb2364_defconfig
@@ -15,7 +15,7 @@
 CONFIG_RESOURCE_COUNTERS=y
 CONFIG_RELAY=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
 CONFIG_SLAB=y
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 0888675..fed2946 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -12,7 +12,10 @@
 	select HAVE_IRQ_WORK
 	select HAVE_PERF_EVENTS
 	select GENERIC_ATOMIC64 if !64BIT
-	select GENERIC_HARDIRQS_NO__DO_IRQ
+	select HAVE_GENERIC_HARDIRQS
+	select GENERIC_IRQ_PROBE
+	select IRQ_PER_CPU
+
 	help
 	  The PA-RISC microprocessor is designed by Hewlett-Packard and used
 	  in many of their workstations & servers (HP9000 700 and 800 series,
@@ -66,22 +69,9 @@
 	depends on SMP
 	default y
 
-config GENERIC_HARDIRQS
-	def_bool y
-
-config GENERIC_IRQ_PROBE
-	def_bool y
-
 config HAVE_LATENCYTOP_SUPPORT
         def_bool y
 
-config IRQ_PER_CPU
-	bool
-	default y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
 # unless you want to implement ACPI on PA-RISC ... ;-)
 config PM
 	bool
diff --git a/arch/parisc/configs/a500_defconfig b/arch/parisc/configs/a500_defconfig
index f9305f3..b647b18 100644
--- a/arch/parisc/configs/a500_defconfig
+++ b/arch/parisc/configs/a500_defconfig
@@ -8,7 +8,7 @@
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_SLAB=y
 CONFIG_PROFILING=y
diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
index 628d3e0..311ca36 100644
--- a/arch/parisc/configs/c3000_defconfig
+++ b/arch/parisc/configs/c3000_defconfig
@@ -6,7 +6,7 @@
 CONFIG_LOG_BUF_SHIFT=16
 CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_SLAB=y
 CONFIG_PROFILING=y
diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c
index 11bdd68..fc770be 100644
--- a/arch/parisc/kernel/pdc_cons.c
+++ b/arch/parisc/kernel/pdc_cons.c
@@ -169,11 +169,11 @@
 
 	struct console *tmp;
 
-	acquire_console_sem();
+	console_lock();
 	for_each_console(tmp)
 		if (tmp == &pdc_cons)
 			break;
-	release_console_sem();
+	console_unlock();
 
 	if (!tmp) {
 		printk(KERN_INFO "PDC console driver not registered anymore, not creating %s\n", pdc_cons.name);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 959f38c..7d69e9b 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -36,24 +36,12 @@
 config GENERIC_CLOCKEVENTS
 	def_bool y
 
-config GENERIC_HARDIRQS
-	bool
-	default y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	bool
-	default y
-
 config HAVE_SETUP_PER_CPU_AREA
 	def_bool PPC64
 
 config NEED_PER_CPU_EMBED_FIRST_CHUNK
 	def_bool PPC64
 
-config IRQ_PER_CPU
-	bool
-	default y
-
 config NR_IRQS
 	int "Number of virtual interrupt numbers"
 	range 32 32768
@@ -143,6 +131,9 @@
 	select HAVE_PERF_EVENTS
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
+	select HAVE_GENERIC_HARDIRQS
+	select HAVE_SPARSE_IRQ
+	select IRQ_PER_CPU
 
 config EARLY_PRINTK
 	bool
@@ -392,19 +383,6 @@
 	  CPU.  Generally saying Y is safe, although some problems have been
 	  reported with SMP Power Macintoshes with this option enabled.
 
-config SPARSE_IRQ
-	bool "Support sparse irq numbering"
-	default n
-	help
-	  This enables support for sparse irqs. This is useful for distro
-	  kernels that want to define a high CONFIG_NR_CPUS value but still
-	  want to have low kernel memory footprint on smaller machines.
-
-	  ( Sparse IRQs can also be beneficial on NUMA boxes, as they spread
-	    out the irq_desc[] array in a more NUMA-friendly way. )
-
-	  If you don't know what to do here, say N.
-
 config NUMA
 	bool "NUMA support"
 	depends on PPC64
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 96deec6..8917816 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -368,7 +368,7 @@
 extra-installed		:= $(patsubst $(obj)/%, $(DESTDIR)$(WRAPPER_OBJDIR)/%, $(extra-y))
 hostprogs-installed	:= $(patsubst %, $(DESTDIR)$(WRAPPER_BINDIR)/%, $(hostprogs-y))
 wrapper-installed	:= $(DESTDIR)$(WRAPPER_BINDIR)/wrapper
-dts-installed		:= $(patsubst $(obj)/dts/%, $(DESTDIR)$(WRAPPER_DTSDIR)/%, $(wildcard $(obj)/dts/*.dts))
+dts-installed		:= $(patsubst $(dtstree)/%, $(DESTDIR)$(WRAPPER_DTSDIR)/%, $(wildcard $(dtstree)/*.dts))
 
 all-installed		:= $(extra-installed) $(hostprogs-installed) $(wrapper-installed) $(dts-installed)
 
diff --git a/arch/powerpc/boot/dts/mpc8308rdb.dts b/arch/powerpc/boot/dts/mpc8308rdb.dts
index d3db02f..a0bd188 100644
--- a/arch/powerpc/boot/dts/mpc8308rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8308rdb.dts
@@ -109,7 +109,7 @@
 		#address-cells = <1>;
 		#size-cells = <1>;
 		device_type = "soc";
-		compatible = "fsl,mpc8315-immr", "simple-bus";
+		compatible = "fsl,mpc8308-immr", "simple-bus";
 		ranges = <0 0xe0000000 0x00100000>;
 		reg = <0xe0000000 0x00000200>;
 		bus-frequency = <0>;
diff --git a/arch/powerpc/boot/dts/p1022ds.dts b/arch/powerpc/boot/dts/p1022ds.dts
index 2bbecbb..69422eb 100644
--- a/arch/powerpc/boot/dts/p1022ds.dts
+++ b/arch/powerpc/boot/dts/p1022ds.dts
@@ -291,13 +291,13 @@
 			ranges = <0x0 0xc100 0x200>;
 			cell-index = <1>;
 			dma00: dma-channel@0 {
-				compatible = "fsl,eloplus-dma-channel";
+				compatible = "fsl,ssi-dma-channel";
 				reg = <0x0 0x80>;
 				cell-index = <0>;
 				interrupts = <76 2>;
 			};
 			dma01: dma-channel@80 {
-				compatible = "fsl,eloplus-dma-channel";
+				compatible = "fsl,ssi-dma-channel";
 				reg = <0x80 0x80>;
 				cell-index = <1>;
 				interrupts = <77 2>;
diff --git a/arch/powerpc/configs/40x/acadia_defconfig b/arch/powerpc/configs/40x/acadia_defconfig
index 97fedce..4182c77 100644
--- a/arch/powerpc/configs/40x/acadia_defconfig
+++ b/arch/powerpc/configs/40x/acadia_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/40x/ep405_defconfig b/arch/powerpc/configs/40x/ep405_defconfig
index 33b3c24..2dbb293 100644
--- a/arch/powerpc/configs/40x/ep405_defconfig
+++ b/arch/powerpc/configs/40x/ep405_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/40x/hcu4_defconfig b/arch/powerpc/configs/40x/hcu4_defconfig
index 4613079..ebeb4ac 100644
--- a/arch/powerpc/configs/40x/hcu4_defconfig
+++ b/arch/powerpc/configs/40x/hcu4_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig
index 34b8c1a..532ea9d 100644
--- a/arch/powerpc/configs/40x/kilauea_defconfig
+++ b/arch/powerpc/configs/40x/kilauea_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/40x/makalu_defconfig b/arch/powerpc/configs/40x/makalu_defconfig
index 651be09..3c142ac 100644
--- a/arch/powerpc/configs/40x/makalu_defconfig
+++ b/arch/powerpc/configs/40x/makalu_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/40x/walnut_defconfig b/arch/powerpc/configs/40x/walnut_defconfig
index ded455e..ff57d48 100644
--- a/arch/powerpc/configs/40x/walnut_defconfig
+++ b/arch/powerpc/configs/40x/walnut_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/44x/arches_defconfig b/arch/powerpc/configs/44x/arches_defconfig
index 63746a041..3ed16d5 100644
--- a/arch/powerpc/configs/44x/arches_defconfig
+++ b/arch/powerpc/configs/44x/arches_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/bamboo_defconfig b/arch/powerpc/configs/44x/bamboo_defconfig
index f5f2a4e..b1b7d2c 100644
--- a/arch/powerpc/configs/44x/bamboo_defconfig
+++ b/arch/powerpc/configs/44x/bamboo_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/bluestone_defconfig b/arch/powerpc/configs/44x/bluestone_defconfig
index ac65b48..30a0a8e 100644
--- a/arch/powerpc/configs/44x/bluestone_defconfig
+++ b/arch/powerpc/configs/44x/bluestone_defconfig
@@ -4,7 +4,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_PCI_QUIRKS is not set
 # CONFIG_COMPAT_BRK is not set
diff --git a/arch/powerpc/configs/44x/canyonlands_defconfig b/arch/powerpc/configs/44x/canyonlands_defconfig
index 17e4dd9..a46942a 100644
--- a/arch/powerpc/configs/44x/canyonlands_defconfig
+++ b/arch/powerpc/configs/44x/canyonlands_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/ebony_defconfig b/arch/powerpc/configs/44x/ebony_defconfig
index fedd03f..07d77e5 100644
--- a/arch/powerpc/configs/44x/ebony_defconfig
+++ b/arch/powerpc/configs/44x/ebony_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/44x/eiger_defconfig b/arch/powerpc/configs/44x/eiger_defconfig
index ebff701..2ce7e9a 100644
--- a/arch/powerpc/configs/44x/eiger_defconfig
+++ b/arch/powerpc/configs/44x/eiger_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/icon_defconfig b/arch/powerpc/configs/44x/icon_defconfig
index 865e93f..18730ff 100644
--- a/arch/powerpc/configs/44x/icon_defconfig
+++ b/arch/powerpc/configs/44x/icon_defconfig
@@ -6,7 +6,7 @@
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
index 8ece4c7..92f863a 100644
--- a/arch/powerpc/configs/44x/iss476-smp_defconfig
+++ b/arch/powerpc/configs/44x/iss476-smp_defconfig
@@ -7,7 +7,7 @@
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_PROFILING=y
diff --git a/arch/powerpc/configs/44x/katmai_defconfig b/arch/powerpc/configs/44x/katmai_defconfig
index 4ca9b48..34c0914 100644
--- a/arch/powerpc/configs/44x/katmai_defconfig
+++ b/arch/powerpc/configs/44x/katmai_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/rainier_defconfig b/arch/powerpc/configs/44x/rainier_defconfig
index e3b65d2..21c33fa 100644
--- a/arch/powerpc/configs/44x/rainier_defconfig
+++ b/arch/powerpc/configs/44x/rainier_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/redwood_defconfig b/arch/powerpc/configs/44x/redwood_defconfig
index 64cd0f3..01cc2b1 100644
--- a/arch/powerpc/configs/44x/redwood_defconfig
+++ b/arch/powerpc/configs/44x/redwood_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/sam440ep_defconfig b/arch/powerpc/configs/44x/sam440ep_defconfig
index 01d0336..dfcffed 100644
--- a/arch/powerpc/configs/44x/sam440ep_defconfig
+++ b/arch/powerpc/configs/44x/sam440ep_defconfig
@@ -6,7 +6,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/sequoia_defconfig b/arch/powerpc/configs/44x/sequoia_defconfig
index 89b2f96..47e399f 100644
--- a/arch/powerpc/configs/44x/sequoia_defconfig
+++ b/arch/powerpc/configs/44x/sequoia_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/taishan_defconfig b/arch/powerpc/configs/44x/taishan_defconfig
index e3386cf..a6a002e 100644
--- a/arch/powerpc/configs/44x/taishan_defconfig
+++ b/arch/powerpc/configs/44x/taishan_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig
index 9c13b9d..6cf9d66 100644
--- a/arch/powerpc/configs/44x/warp_defconfig
+++ b/arch/powerpc/configs/44x/warp_defconfig
@@ -8,7 +8,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig
index f234c4d..69b57da 100644
--- a/arch/powerpc/configs/52xx/cm5200_defconfig
+++ b/arch/powerpc/configs/52xx/cm5200_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig
index a4a795c..f3638ae 100644
--- a/arch/powerpc/configs/52xx/lite5200b_defconfig
+++ b/arch/powerpc/configs/52xx/lite5200b_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig
index 20d53a1..6828eda 100644
--- a/arch/powerpc/configs/52xx/motionpro_defconfig
+++ b/arch/powerpc/configs/52xx/motionpro_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig
index 6bd5833..7f7e4a8 100644
--- a/arch/powerpc/configs/52xx/pcm030_defconfig
+++ b/arch/powerpc/configs/52xx/pcm030_defconfig
@@ -8,7 +8,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_VM_EVENT_COUNTERS is not set
 CONFIG_SLAB=y
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
index 3a1f702..959cd2c 100644
--- a/arch/powerpc/configs/52xx/tqm5200_defconfig
+++ b/arch/powerpc/configs/52xx/tqm5200_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_KALLSYMS is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/83xx/asp8347_defconfig b/arch/powerpc/configs/83xx/asp8347_defconfig
index eed42d8..d2762d9 100644
--- a/arch/powerpc/configs/83xx/asp8347_defconfig
+++ b/arch/powerpc/configs/83xx/asp8347_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/kmeter1_defconfig b/arch/powerpc/configs/83xx/kmeter1_defconfig
index e43ecb2..7a7b731 100644
--- a/arch/powerpc/configs/83xx/kmeter1_defconfig
+++ b/arch/powerpc/configs/83xx/kmeter1_defconfig
@@ -3,7 +3,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_HOTPLUG is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
index c2e6ab5..c683bce 100644
--- a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
index 1d3b200..a721cd3 100644
--- a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
index 91fe73b..a5699a1 100644
--- a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
index 6d300f2..b4da1a7 100644
--- a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
index b236a67..291f822 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
index 001dead..f8b228a 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
index 9dccefc..99660c0 100644
--- a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
index d4b165d..10b5c4c 100644
--- a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
index 89ba672..45925d7 100644
--- a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
+++ b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
index 2ea6b40..f367985 100644
--- a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
+++ b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
index bffe3c7..414eda3 100644
--- a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/83xx/sbc834x_defconfig b/arch/powerpc/configs/83xx/sbc834x_defconfig
index fa5c9ee..6d6463f 100644
--- a/arch/powerpc/configs/83xx/sbc834x_defconfig
+++ b/arch/powerpc/configs/83xx/sbc834x_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_SLAB=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/85xx/ksi8560_defconfig b/arch/powerpc/configs/85xx/ksi8560_defconfig
index 385b1af..8f7c106 100644
--- a/arch/powerpc/configs/85xx/ksi8560_defconfig
+++ b/arch/powerpc/configs/85xx/ksi8560_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_KSI8560=y
 CONFIG_CPM2=y
diff --git a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
index 222b704..55e0725 100644
--- a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_MPC8540_ADS=y
 CONFIG_NO_HZ=y
diff --git a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
index 619702d..d724095 100644
--- a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
+++ b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_MPC8560_ADS=y
 CONFIG_BINFMT_MISC=y
diff --git a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
index 6bf56e8..4b44bea 100644
--- a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
+++ b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_MPC85xx_CDS=y
 CONFIG_NO_HZ=y
diff --git a/arch/powerpc/configs/85xx/sbc8548_defconfig b/arch/powerpc/configs/85xx/sbc8548_defconfig
index a9a17d0..5b2b651 100644
--- a/arch/powerpc/configs/85xx/sbc8548_defconfig
+++ b/arch/powerpc/configs/85xx/sbc8548_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_SBC8548=y
diff --git a/arch/powerpc/configs/85xx/sbc8560_defconfig b/arch/powerpc/configs/85xx/sbc8560_defconfig
index 820e32d..f7fdb03 100644
--- a/arch/powerpc/configs/85xx/sbc8560_defconfig
+++ b/arch/powerpc/configs/85xx/sbc8560_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_SBC8560=y
diff --git a/arch/powerpc/configs/85xx/socrates_defconfig b/arch/powerpc/configs/85xx/socrates_defconfig
index b6db3f4..77506b5 100644
--- a/arch/powerpc/configs/85xx/socrates_defconfig
+++ b/arch/powerpc/configs/85xx/socrates_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=16
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/85xx/stx_gp3_defconfig b/arch/powerpc/configs/85xx/stx_gp3_defconfig
index 333a41b..5d4db15 100644
--- a/arch/powerpc/configs/85xx/stx_gp3_defconfig
+++ b/arch/powerpc/configs/85xx/stx_gp3_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODVERSIONS=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/85xx/tqm8540_defconfig b/arch/powerpc/configs/85xx/tqm8540_defconfig
index 33db352..ddcb9f3 100644
--- a/arch/powerpc/configs/85xx/tqm8540_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8540_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/85xx/tqm8541_defconfig b/arch/powerpc/configs/85xx/tqm8541_defconfig
index f0c20df..981abd6 100644
--- a/arch/powerpc/configs/85xx/tqm8541_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8541_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/85xx/tqm8548_defconfig b/arch/powerpc/configs/85xx/tqm8548_defconfig
index a883450..37b3d72 100644
--- a/arch/powerpc/configs/85xx/tqm8548_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8548_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
diff --git a/arch/powerpc/configs/85xx/tqm8555_defconfig b/arch/powerpc/configs/85xx/tqm8555_defconfig
index ff95f90..3593b32 100644
--- a/arch/powerpc/configs/85xx/tqm8555_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8555_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/85xx/tqm8560_defconfig b/arch/powerpc/configs/85xx/tqm8560_defconfig
index 8d6c90e..de413ac 100644
--- a/arch/powerpc/configs/85xx/tqm8560_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8560_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_EPOLL is not set
diff --git a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
index f53efe4..5ea3124 100644
--- a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
+++ b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
@@ -11,7 +11,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
index 432ebc2..4b24412 100644
--- a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
+++ b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
@@ -11,7 +11,7 @@
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/86xx/gef_sbc310_defconfig b/arch/powerpc/configs/86xx/gef_sbc310_defconfig
index ce5e919..a360ba4 100644
--- a/arch/powerpc/configs/86xx/gef_sbc310_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc310_defconfig
@@ -11,7 +11,7 @@
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
index 589e71e..be2829d 100644
--- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
@@ -11,7 +11,7 @@
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig b/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
index 321fb47..036bfb2 100644
--- a/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
+++ b/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
@@ -6,7 +6,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 # CONFIG_ELF_CORE is not set
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
index b5e4639..0c9c7ed 100644
--- a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
+++ b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
@@ -10,7 +10,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/86xx/sbc8641d_defconfig b/arch/powerpc/configs/86xx/sbc8641d_defconfig
index 71145c3..0a92ca0 100644
--- a/arch/powerpc/configs/86xx/sbc8641d_defconfig
+++ b/arch/powerpc/configs/86xx/sbc8641d_defconfig
@@ -11,7 +11,7 @@
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig
index ca84c7f..6912874 100644
--- a/arch/powerpc/configs/adder875_defconfig
+++ b/arch/powerpc/configs/adder875_defconfig
@@ -4,7 +4,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_BASE_FULL is not set
diff --git a/arch/powerpc/configs/e55xx_smp_defconfig b/arch/powerpc/configs/e55xx_smp_defconfig
index 94d120e..06f9549 100644
--- a/arch/powerpc/configs/e55xx_smp_defconfig
+++ b/arch/powerpc/configs/e55xx_smp_defconfig
@@ -12,7 +12,7 @@
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/ep8248e_defconfig b/arch/powerpc/configs/ep8248e_defconfig
index 2677b08..fceffb3 100644
--- a/arch/powerpc/configs/ep8248e_defconfig
+++ b/arch/powerpc/configs/ep8248e_defconfig
@@ -2,7 +2,7 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_SLAB=y
 # CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig
index f9a3112..219fd47 100644
--- a/arch/powerpc/configs/ep88xc_defconfig
+++ b/arch/powerpc/configs/ep88xc_defconfig
@@ -4,7 +4,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_BASE_FULL is not set
diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig
index fcf0a39..e74d3a4 100644
--- a/arch/powerpc/configs/gamecube_defconfig
+++ b/arch/powerpc/configs/gamecube_defconfig
@@ -6,7 +6,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_ELF_CORE is not set
 CONFIG_PERF_COUNTERS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/powerpc/configs/holly_defconfig b/arch/powerpc/configs/holly_defconfig
index b9b63a6..94ebfee 100644
--- a/arch/powerpc/configs/holly_defconfig
+++ b/arch/powerpc/configs/holly_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_MODULES=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_PPC_CHRP is not set
diff --git a/arch/powerpc/configs/mgcoge_defconfig b/arch/powerpc/configs/mgcoge_defconfig
index c4ed255..39518e9 100644
--- a/arch/powerpc/configs/mgcoge_defconfig
+++ b/arch/powerpc/configs/mgcoge_defconfig
@@ -3,7 +3,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_SLAB=y
 # CONFIG_IOSCHED_CFQ is not set
diff --git a/arch/powerpc/configs/mgsuvd_defconfig b/arch/powerpc/configs/mgsuvd_defconfig
index f276c7c..2a49062 100644
--- a/arch/powerpc/configs/mgsuvd_defconfig
+++ b/arch/powerpc/configs/mgsuvd_defconfig
@@ -4,7 +4,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_BUG is not set
diff --git a/arch/powerpc/configs/mpc7448_hpc2_defconfig b/arch/powerpc/configs/mpc7448_hpc2_defconfig
index 3b94708..75f0bbf 100644
--- a/arch/powerpc/configs/mpc7448_hpc2_defconfig
+++ b/arch/powerpc/configs/mpc7448_hpc2_defconfig
@@ -4,7 +4,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_BLK_DEV_BSG is not set
 # CONFIG_PPC_CHRP is not set
 # CONFIG_PPC_PMAC is not set
diff --git a/arch/powerpc/configs/mpc8272_ads_defconfig b/arch/powerpc/configs/mpc8272_ads_defconfig
index c7d68ff..6a22400 100644
--- a/arch/powerpc/configs/mpc8272_ads_defconfig
+++ b/arch/powerpc/configs/mpc8272_ads_defconfig
@@ -2,7 +2,7 @@
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_PPC_CHRP is not set
 # CONFIG_PPC_PMAC is not set
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig
index 5b1b10f..5aac9a8 100644
--- a/arch/powerpc/configs/mpc83xx_defconfig
+++ b/arch/powerpc/configs/mpc83xx_defconfig
@@ -3,7 +3,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 3aeb594..99a19d1 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -10,7 +10,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index d62c801..c636f23 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -12,7 +12,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig
index 668215c..5c25882 100644
--- a/arch/powerpc/configs/mpc866_ads_defconfig
+++ b/arch/powerpc/configs/mpc866_ads_defconfig
@@ -4,7 +4,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_HOTPLUG is not set
 # CONFIG_BUG is not set
diff --git a/arch/powerpc/configs/mpc86xx_defconfig b/arch/powerpc/configs/mpc86xx_defconfig
index 63b90d4..55b5431 100644
--- a/arch/powerpc/configs/mpc86xx_defconfig
+++ b/arch/powerpc/configs/mpc86xx_defconfig
@@ -10,7 +10,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index f9b8348..9e146cd 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -4,7 +4,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_BASE_FULL is not set
diff --git a/arch/powerpc/configs/ppc40x_defconfig b/arch/powerpc/configs/ppc40x_defconfig
index 93d7425..bfd634b 100644
--- a/arch/powerpc/configs/ppc40x_defconfig
+++ b/arch/powerpc/configs/ppc40x_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index 2fa05f7..4713320 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/pq2fads_defconfig b/arch/powerpc/configs/pq2fads_defconfig
index a4353be..baad8db 100644
--- a/arch/powerpc/configs/pq2fads_defconfig
+++ b/arch/powerpc/configs/pq2fads_defconfig
@@ -3,7 +3,7 @@
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
 # CONFIG_PPC_CHRP is not set
 # CONFIG_PPC_PMAC is not set
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 49cffe0..caba919 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -8,7 +8,7 @@
 CONFIG_POSIX_MQUEUE=y
 CONFIG_NAMESPACES=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_KALLSYMS_EXTRA_PASS=y
 # CONFIG_PERF_EVENTS is not set
 # CONFIG_COMPAT_BRK is not set
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index f87f0e1..9c3f22c 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -2,7 +2,7 @@
 CONFIG_ALTIVEC=y
 CONFIG_VSX=y
 CONFIG_SMP=y
-CONFIG_NR_CPUS=128
+CONFIG_NR_CPUS=1024
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -45,6 +45,8 @@
 CONFIG_IRQ_ALL_CPUS=y
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_PPC_64K_PAGES=y
+CONFIG_PPC_SUBPAGE_PROT=y
 CONFIG_SCHED_SMT=y
 CONFIG_HOTPLUG_PCI=m
 CONFIG_HOTPLUG_PCI_RPA=m
@@ -184,6 +186,7 @@
 CONFIG_E1000=y
 CONFIG_E1000E=y
 CONFIG_TIGON3=y
+CONFIG_BNX2=m
 CONFIG_CHELSIO_T1=m
 CONFIG_CHELSIO_T3=m
 CONFIG_EHEA=y
@@ -311,9 +314,7 @@
 # CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_LATENCYTOP=y
 CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_IRQSOFF_TRACER=y
 CONFIG_SCHED_TRACER=y
-CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_DEBUG_STACKOVERFLOW=y
 CONFIG_DEBUG_STACK_USAGE=y
diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig
index 4f0c10a..ebb2a66 100644
--- a/arch/powerpc/configs/storcenter_defconfig
+++ b/arch/powerpc/configs/storcenter_defconfig
@@ -1,7 +1,7 @@
 CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig
index d0a5b67..8616fde 100644
--- a/arch/powerpc/configs/tqm8xx_defconfig
+++ b/arch/powerpc/configs/tqm8xx_defconfig
@@ -5,7 +5,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_SYSCTL_SYSCALL is not set
 # CONFIG_ELF_CORE is not set
 # CONFIG_BASE_FULL is not set
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig
index bb8ba75..175295f 100644
--- a/arch/powerpc/configs/wii_defconfig
+++ b/arch/powerpc/configs/wii_defconfig
@@ -7,7 +7,7 @@
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_ELF_CORE is not set
 CONFIG_PERF_COUNTERS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 96a7d06..921a847 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -37,18 +37,21 @@
 	.align 2;					\
 label##3:
 
-#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect)	\
-label##4:						\
-	.popsection;					\
-	.pushsection sect,"a";				\
-	.align 3;					\
-label##5:					       	\
-	FTR_ENTRY_LONG msk;				\
-	FTR_ENTRY_LONG val;				\
-	FTR_ENTRY_OFFSET label##1b-label##5b;		\
-	FTR_ENTRY_OFFSET label##2b-label##5b;	 	\
-	FTR_ENTRY_OFFSET label##3b-label##5b;		\
-	FTR_ENTRY_OFFSET label##4b-label##5b;	 	\
+#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect)		\
+label##4:							\
+	.popsection;						\
+	.pushsection sect,"a";					\
+	.align 3;						\
+label##5:							\
+	FTR_ENTRY_LONG msk;					\
+	FTR_ENTRY_LONG val;					\
+	FTR_ENTRY_OFFSET label##1b-label##5b;			\
+	FTR_ENTRY_OFFSET label##2b-label##5b;			\
+	FTR_ENTRY_OFFSET label##3b-label##5b;			\
+	FTR_ENTRY_OFFSET label##4b-label##5b;			\
+	.ifgt (label##4b-label##3b)-(label##2b-label##1b);	\
+	.error "Feature section else case larger than body";	\
+	.endif;							\
 	.popsection;
 
 
diff --git a/arch/powerpc/include/asm/immap_qe.h b/arch/powerpc/include/asm/immap_qe.h
index 4e10f50..0edb684 100644
--- a/arch/powerpc/include/asm/immap_qe.h
+++ b/arch/powerpc/include/asm/immap_qe.h
@@ -467,13 +467,22 @@
 extern struct qe_immap __iomem *qe_immr;
 extern phys_addr_t get_qe_base(void);
 
-static inline unsigned long immrbar_virt_to_phys(void *address)
+/*
+ * Returns the offset within the QE address space of the given pointer.
+ *
+ * Note that the QE does not support 36-bit physical addresses, so if
+ * get_qe_base() returns a number above 4GB, the caller will probably fail.
+ */
+static inline phys_addr_t immrbar_virt_to_phys(void *address)
 {
-	if ( ((u32)address >= (u32)qe_immr) &&
-			((u32)address < ((u32)qe_immr + QE_IMMAP_SIZE)) )
-		return (unsigned long)(address - (u32)qe_immr +
-				(u32)get_qe_base());
-	return (unsigned long)virt_to_phys(address);
+	void *q = (void *)qe_immr;
+
+	/* Is it a MURAM address? */
+	if ((address >= q) && (address < (q + QE_IMMAP_SIZE)))
+		return get_qe_base() + (address - q);
+
+	/* It's an address returned by kmalloc */
+	return virt_to_phys(address);
 }
 
 #endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h
index b85d8dd..b0b06d8 100644
--- a/arch/powerpc/include/asm/irqflags.h
+++ b/arch/powerpc/include/asm/irqflags.h
@@ -12,24 +12,44 @@
 
 #else
 #ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_IRQSOFF_TRACER
+/*
+ * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
+ * which is the stack frame here, we need to force a stack frame
+ * in case we came from user space.
+ */
+#define TRACE_WITH_FRAME_BUFFER(func)		\
+	mflr	r0;				\
+	stdu	r1, -32(r1);			\
+	std	r0, 16(r1);			\
+	stdu	r1, -32(r1);			\
+	bl func;				\
+	ld	r1, 0(r1);			\
+	ld	r1, 0(r1);
+#else
+#define TRACE_WITH_FRAME_BUFFER(func)		\
+	bl func;
+#endif
+
 /*
  * Most of the CPU's IRQ-state tracing is done from assembly code; we
  * have to call a C function so call a wrapper that saves all the
  * C-clobbered registers.
  */
-#define TRACE_ENABLE_INTS	bl .trace_hardirqs_on
-#define TRACE_DISABLE_INTS	bl .trace_hardirqs_off
-#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip)	\
-	cmpdi	en,0;				\
-	bne	95f;				\
-	stb	en,PACASOFTIRQEN(r13);		\
-	bl	.trace_hardirqs_off;		\
-	b	skip;				\
-95:	bl	.trace_hardirqs_on;		\
+#define TRACE_ENABLE_INTS	TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on)
+#define TRACE_DISABLE_INTS	TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off)
+
+#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip)		\
+	cmpdi	en,0;					\
+	bne	95f;					\
+	stb	en,PACASOFTIRQEN(r13);			\
+	TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off)	\
+	b	skip;					\
+95:	TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on)	\
 	li	en,1;
 #define TRACE_AND_RESTORE_IRQ(en)		\
 	TRACE_AND_RESTORE_IRQ_PARTIAL(en,96f);	\
-	stb	en,PACASOFTIRQEN(r13);	        \
+	stb	en,PACASOFTIRQEN(r13);		\
 96:
 #else
 #define TRACE_ENABLE_INTS
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 8433d36..991d599 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -116,9 +116,6 @@
 	 * If for some reason there is no irq, but the interrupt
 	 * shouldn't be counted as spurious, return NO_IRQ_IGNORE. */
 	unsigned int	(*get_irq)(void);
-#ifdef CONFIG_KEXEC
-	void		(*kexec_cpu_down)(int crash_shutdown, int secondary);
-#endif
 
 	/* PCI stuff */
 	/* Called after scanning the bus, before allocating resources */
@@ -235,11 +232,7 @@
 	void (*machine_shutdown)(void);
 
 #ifdef CONFIG_KEXEC
-	/* Called to do the minimal shutdown needed to run a kexec'd kernel
-	 * to run successfully.
-	 * XXX Should we move this one out of kexec scope?
-	 */
-	void (*machine_crash_shutdown)(struct pt_regs *regs);
+	void (*kexec_cpu_down)(int crash_shutdown, int secondary);
 
 	/* Called to do what every setup is needed on image and the
 	 * reboot code buffer. Returns 0 on success.
@@ -247,15 +240,6 @@
 	 * claims to support kexec.
 	 */
 	int (*machine_kexec_prepare)(struct kimage *image);
-
-	/* Called to handle any machine specific cleanup on image */
-	void (*machine_kexec_cleanup)(struct kimage *image);
-
-	/* Called to perform the _real_ kexec.
-	 * Do NOT allocate memory or fail here. We are past the point of
-	 * no return.
-	 */
-	void (*machine_kexec)(struct kimage *image);
 #endif /* CONFIG_KEXEC */
 
 #ifdef CONFIG_SUSPEND
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 8eaed81..17194fc 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,8 +40,8 @@
 
 /* MAS registers bit definitions */
 
-#define MAS0_TLBSEL(x)		((x << 28) & 0x30000000)
-#define MAS0_ESEL(x)		((x << 16) & 0x0FFF0000)
+#define MAS0_TLBSEL(x)		(((x) << 28) & 0x30000000)
+#define MAS0_ESEL(x)		(((x) << 16) & 0x0FFF0000)
 #define MAS0_NV(x)		((x) & 0x00000FFF)
 #define MAS0_HES		0x00004000
 #define MAS0_WQ_ALLWAYS		0x00000000
@@ -50,12 +50,12 @@
 
 #define MAS1_VALID		0x80000000
 #define MAS1_IPROT		0x40000000
-#define MAS1_TID(x)		((x << 16) & 0x3FFF0000)
+#define MAS1_TID(x)		(((x) << 16) & 0x3FFF0000)
 #define MAS1_IND		0x00002000
 #define MAS1_TS			0x00001000
 #define MAS1_TSIZE_MASK		0x00000f80
 #define MAS1_TSIZE_SHIFT	7
-#define MAS1_TSIZE(x)		((x << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
+#define MAS1_TSIZE(x)		(((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
 
 #define MAS2_EPN		0xFFFFF000
 #define MAS2_X0			0x00000040
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 53b64be..da4b200 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -101,7 +101,7 @@
 
 #ifdef CONFIG_FLATMEM
 #define ARCH_PFN_OFFSET		(MEMORY_START >> PAGE_SHIFT)
-#define pfn_valid(pfn)		((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr))
+#define pfn_valid(pfn)		((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr)
 #endif
 
 #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index ff0005eec..125fc1a 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -283,6 +283,7 @@
 #define HID0_NOPTI	(1<<0)		/* No-op dcbt and dcbst instr. */
 
 #define SPRN_HID1	0x3F1		/* Hardware Implementation Register 1 */
+#ifdef CONFIG_6xx
 #define HID1_EMCP	(1<<31)		/* 7450 Machine Check Pin Enable */
 #define HID1_DFS	(1<<22)		/* 7447A Dynamic Frequency Scaling */
 #define HID1_PC0	(1<<16)		/* 7450 PLL_CFG[0] */
@@ -292,6 +293,7 @@
 #define HID1_SYNCBE	(1<<11)		/* 7450 ABE for sync, eieio */
 #define HID1_ABE	(1<<10)		/* 7450 Address Broadcast Enable */
 #define HID1_PS		(1<<16)		/* 750FX PLL selection */
+#endif
 #define SPRN_HID2	0x3F8		/* Hardware Implementation Register 2 */
 #define SPRN_HID2_GEKKO	0x398		/* Gekko HID2 Register */
 #define SPRN_IABR	0x3F2	/* Instruction Address Breakpoint Register */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 667a498..e68c69b 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -246,6 +246,20 @@
 					store or cache line push */
 #endif
 
+/* Bit definitions for the HID1 */
+#ifdef CONFIG_E500
+/* e500v1/v2 */
+#define HID1_PLL_CFG_MASK 0xfc000000	/* PLL_CFG input pins */
+#define HID1_RFXE	0x00020000	/* Read fault exception enable */
+#define HID1_R1DPE	0x00008000	/* R1 data bus parity enable */
+#define HID1_R2DPE	0x00004000	/* R2 data bus parity enable */
+#define HID1_ASTME	0x00002000	/* Address bus streaming mode enable */
+#define HID1_ABE	0x00001000	/* Address broadcast enable */
+#define HID1_MPXTT	0x00000400	/* MPX re-map transfer type */
+#define HID1_ATS	0x00000080	/* Atomic status */
+#define HID1_MID_MASK	0x0000000f	/* MID input pins */
+#endif
+
 /* Bit definitions for the DBSR. */
 /*
  * DBSR bits which have conflicting definitions on true Book E versus IBM 40x.
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
index 0ab8d869..0c8b35d 100644
--- a/arch/powerpc/include/asm/spu.h
+++ b/arch/powerpc/include/asm/spu.h
@@ -203,14 +203,6 @@
 void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
 		void *code, int code_size);
 
-#ifdef CONFIG_KEXEC
-void crash_register_spus(struct list_head *list);
-#else
-static inline void crash_register_spus(struct list_head *list)
-{
-}
-#endif
-
 extern void spu_invalidate_slbs(struct spu *spu);
 extern void spu_associate_mm(struct spu *spu, struct mm_struct *mm);
 int spu_64k_pages_available(void);
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index 55cba4a..f8cd9fb 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -18,7 +18,7 @@
 #include <asm/mmu.h>
 
 _GLOBAL(__setup_cpu_603)
-	mflr	r4
+	mflr	r5
 BEGIN_MMU_FTR_SECTION
 	li	r10,0
 	mtspr	SPRN_SPRG_603_LRU,r10		/* init SW LRU tracking */
@@ -27,60 +27,60 @@
 	bl	__init_fpu_registers
 END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
 	bl	setup_common_caches
-	mtlr	r4
+	mtlr	r5
 	blr
 _GLOBAL(__setup_cpu_604)
-	mflr	r4
+	mflr	r5
 	bl	setup_common_caches
 	bl	setup_604_hid0
-	mtlr	r4
+	mtlr	r5
 	blr
 _GLOBAL(__setup_cpu_750)
-	mflr	r4
+	mflr	r5
 	bl	__init_fpu_registers
 	bl	setup_common_caches
 	bl	setup_750_7400_hid0
-	mtlr	r4
+	mtlr	r5
 	blr
 _GLOBAL(__setup_cpu_750cx)
-	mflr	r4
+	mflr	r5
 	bl	__init_fpu_registers
 	bl	setup_common_caches
 	bl	setup_750_7400_hid0
 	bl	setup_750cx
-	mtlr	r4
+	mtlr	r5
 	blr
 _GLOBAL(__setup_cpu_750fx)
-	mflr	r4
+	mflr	r5
 	bl	__init_fpu_registers
 	bl	setup_common_caches
 	bl	setup_750_7400_hid0
 	bl	setup_750fx
-	mtlr	r4
+	mtlr	r5
 	blr
 _GLOBAL(__setup_cpu_7400)
-	mflr	r4
+	mflr	r5
 	bl	__init_fpu_registers
 	bl	setup_7400_workarounds
 	bl	setup_common_caches
 	bl	setup_750_7400_hid0
-	mtlr	r4
+	mtlr	r5
 	blr
 _GLOBAL(__setup_cpu_7410)
-	mflr	r4
+	mflr	r5
 	bl	__init_fpu_registers
 	bl	setup_7410_workarounds
 	bl	setup_common_caches
 	bl	setup_750_7400_hid0
 	li	r3,0
 	mtspr	SPRN_L2CR2,r3
-	mtlr	r4
+	mtlr	r5
 	blr
 _GLOBAL(__setup_cpu_745x)
-	mflr	r4
+	mflr	r5
 	bl	setup_common_caches
 	bl	setup_745x_specifics
-	mtlr	r4
+	mtlr	r5
 	blr
 
 /* Enable caches for 603's, 604, 750 & 7400 */
@@ -194,10 +194,10 @@
 	cror	4*cr0+eq,4*cr0+eq,4*cr1+eq
 	cror	4*cr0+eq,4*cr0+eq,4*cr2+eq
 	bnelr
-	lwz	r6,CPU_SPEC_FEATURES(r5)
+	lwz	r6,CPU_SPEC_FEATURES(r4)
 	li	r7,CPU_FTR_CAN_NAP
 	andc	r6,r6,r7
-	stw	r6,CPU_SPEC_FEATURES(r5)
+	stw	r6,CPU_SPEC_FEATURES(r4)
 	blr
 
 /* 750fx specific
@@ -225,12 +225,12 @@
 	andis.	r11,r11,L3CR_L3E@h
 	beq	1f
 END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
-	lwz	r6,CPU_SPEC_FEATURES(r5)
+	lwz	r6,CPU_SPEC_FEATURES(r4)
 	andi.	r0,r6,CPU_FTR_L3_DISABLE_NAP
 	beq	1f
 	li	r7,CPU_FTR_CAN_NAP
 	andc	r6,r6,r7
-	stw	r6,CPU_SPEC_FEATURES(r5)
+	stw	r6,CPU_SPEC_FEATURES(r4)
 1:
 	mfspr	r11,SPRN_HID0
 
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index 894e64f..5c518ad 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -64,6 +64,12 @@
 	bl	__e500_icache_setup
 	bl	__e500_dcache_setup
 	bl	__setup_e500_ivors
+#ifdef CONFIG_RAPIDIO
+	/* Ensure that RFXE is set */
+	mfspr	r3,SPRN_HID1
+	oris	r3,r3,HID1_RFXE@h
+	mtspr	SPRN_HID1,r3
+#endif
 	mtlr	r4
 	blr
 _GLOBAL(__setup_cpu_e500mc)
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index be5ab18..e8e915c 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -116,7 +116,6 @@
 		.pmc_type		= PPC_PMC_IBM,
 		.oprofile_cpu_type	= "ppc64/power3",
 		.oprofile_type		= PPC_OPROFILE_RS64,
-		.machine_check		= machine_check_generic,
 		.platform		= "power3",
 	},
 	{	/* Power3+ */
@@ -132,7 +131,6 @@
 		.pmc_type		= PPC_PMC_IBM,
 		.oprofile_cpu_type	= "ppc64/power3",
 		.oprofile_type		= PPC_OPROFILE_RS64,
-		.machine_check		= machine_check_generic,
 		.platform		= "power3",
 	},
 	{	/* Northstar */
@@ -148,7 +146,6 @@
 		.pmc_type		= PPC_PMC_IBM,
 		.oprofile_cpu_type	= "ppc64/rs64",
 		.oprofile_type		= PPC_OPROFILE_RS64,
-		.machine_check		= machine_check_generic,
 		.platform		= "rs64",
 	},
 	{	/* Pulsar */
@@ -164,7 +161,6 @@
 		.pmc_type		= PPC_PMC_IBM,
 		.oprofile_cpu_type	= "ppc64/rs64",
 		.oprofile_type		= PPC_OPROFILE_RS64,
-		.machine_check		= machine_check_generic,
 		.platform		= "rs64",
 	},
 	{	/* I-star */
@@ -180,7 +176,6 @@
 		.pmc_type		= PPC_PMC_IBM,
 		.oprofile_cpu_type	= "ppc64/rs64",
 		.oprofile_type		= PPC_OPROFILE_RS64,
-		.machine_check		= machine_check_generic,
 		.platform		= "rs64",
 	},
 	{	/* S-star */
@@ -196,7 +191,6 @@
 		.pmc_type		= PPC_PMC_IBM,
 		.oprofile_cpu_type	= "ppc64/rs64",
 		.oprofile_type		= PPC_OPROFILE_RS64,
-		.machine_check		= machine_check_generic,
 		.platform		= "rs64",
 	},
 	{	/* Power4 */
@@ -212,7 +206,6 @@
 		.pmc_type		= PPC_PMC_IBM,
 		.oprofile_cpu_type	= "ppc64/power4",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
-		.machine_check		= machine_check_generic,
 		.platform		= "power4",
 	},
 	{	/* Power4+ */
@@ -228,7 +221,6 @@
 		.pmc_type		= PPC_PMC_IBM,
 		.oprofile_cpu_type	= "ppc64/power4",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
-		.machine_check		= machine_check_generic,
 		.platform		= "power4",
 	},
 	{	/* PPC970 */
@@ -247,7 +239,6 @@
 		.cpu_restore		= __restore_cpu_ppc970,
 		.oprofile_cpu_type	= "ppc64/970",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
-		.machine_check		= machine_check_generic,
 		.platform		= "ppc970",
 	},
 	{	/* PPC970FX */
@@ -266,7 +257,6 @@
 		.cpu_restore		= __restore_cpu_ppc970,
 		.oprofile_cpu_type	= "ppc64/970",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
-		.machine_check		= machine_check_generic,
 		.platform		= "ppc970",
 	},
 	{	/* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */
@@ -285,7 +275,6 @@
 		.cpu_restore		= __restore_cpu_ppc970,
 		.oprofile_cpu_type	= "ppc64/970MP",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
-		.machine_check		= machine_check_generic,
 		.platform		= "ppc970",
 	},
 	{	/* PPC970MP */
@@ -304,7 +293,6 @@
 		.cpu_restore		= __restore_cpu_ppc970,
 		.oprofile_cpu_type	= "ppc64/970MP",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
-		.machine_check		= machine_check_generic,
 		.platform		= "ppc970",
 	},
 	{	/* PPC970GX */
@@ -322,7 +310,6 @@
 		.cpu_setup		= __setup_cpu_ppc970,
 		.oprofile_cpu_type	= "ppc64/970",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
-		.machine_check		= machine_check_generic,
 		.platform		= "ppc970",
 	},
 	{	/* Power5 GR */
@@ -343,7 +330,6 @@
 		 */
 		.oprofile_mmcra_sihv	= MMCRA_SIHV,
 		.oprofile_mmcra_sipr	= MMCRA_SIPR,
-		.machine_check		= machine_check_generic,
 		.platform		= "power5",
 	},
 	{	/* Power5++ */
@@ -360,7 +346,6 @@
 		.oprofile_type		= PPC_OPROFILE_POWER4,
 		.oprofile_mmcra_sihv	= MMCRA_SIHV,
 		.oprofile_mmcra_sipr	= MMCRA_SIPR,
-		.machine_check		= machine_check_generic,
 		.platform		= "power5+",
 	},
 	{	/* Power5 GS */
@@ -378,7 +363,6 @@
 		.oprofile_type		= PPC_OPROFILE_POWER4,
 		.oprofile_mmcra_sihv	= MMCRA_SIHV,
 		.oprofile_mmcra_sipr	= MMCRA_SIPR,
-		.machine_check		= machine_check_generic,
 		.platform		= "power5+",
 	},
 	{	/* POWER6 in P5+ mode; 2.04-compliant processor */
@@ -390,7 +374,6 @@
 		.mmu_features		= MMU_FTR_HPTE_TABLE,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
-		.machine_check		= machine_check_generic,
 		.oprofile_cpu_type	= "ppc64/ibm-compat-v1",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
 		.platform		= "power5+",
@@ -413,7 +396,6 @@
 		.oprofile_mmcra_sipr	= POWER6_MMCRA_SIPR,
 		.oprofile_mmcra_clear	= POWER6_MMCRA_THRM |
 			POWER6_MMCRA_OTHER,
-		.machine_check		= machine_check_generic,
 		.platform		= "power6x",
 	},
 	{	/* 2.05-compliant processor, i.e. Power6 "architected" mode */
@@ -425,7 +407,6 @@
 		.mmu_features		= MMU_FTR_HPTE_TABLE,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
-		.machine_check		= machine_check_generic,
 		.oprofile_cpu_type	= "ppc64/ibm-compat-v1",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
 		.platform		= "power6",
@@ -440,7 +421,6 @@
 			MMU_FTR_TLBIE_206,
 		.icache_bsize		= 128,
 		.dcache_bsize		= 128,
-		.machine_check		= machine_check_generic,
 		.oprofile_type		= PPC_OPROFILE_POWER4,
 		.oprofile_cpu_type	= "ppc64/ibm-compat-v1",
 		.platform		= "power7",
@@ -492,7 +472,6 @@
 		.pmc_type		= PPC_PMC_IBM,
 		.oprofile_cpu_type	= "ppc64/cell-be",
 		.oprofile_type		= PPC_OPROFILE_CELL,
-		.machine_check		= machine_check_generic,
 		.platform		= "ppc-cell-be",
 	},
 	{	/* PA Semi PA6T */
@@ -510,7 +489,6 @@
 		.cpu_restore		= __restore_cpu_pa6t,
 		.oprofile_cpu_type	= "ppc64/pa6t",
 		.oprofile_type		= PPC_OPROFILE_PA6T,
-		.machine_check		= machine_check_generic,
 		.platform		= "pa6t",
 	},
 	{	/* default match */
@@ -524,7 +502,6 @@
 		.dcache_bsize		= 128,
 		.num_pmcs		= 6,
 		.pmc_type		= PPC_PMC_IBM,
-		.machine_check		= machine_check_generic,
 		.platform		= "power4",
 	}
 #endif	/* CONFIG_PPC_BOOK3S_64 */
@@ -2099,8 +2076,8 @@
 	 * pointer on ppc64 and booke as we are running at 0 in real mode
 	 * on ppc64 and reloc_offset is always 0 on booke.
 	 */
-	if (s->cpu_setup) {
-		s->cpu_setup(offset, s);
+	if (t->cpu_setup) {
+		t->cpu_setup(offset, t);
 	}
 #endif /* CONFIG_PPC64 || CONFIG_BOOKE */
 }
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 832c8c4..3d569e2 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -48,7 +48,7 @@
 static cpumask_t cpus_in_crash = CPU_MASK_NONE;
 cpumask_t cpus_in_sr = CPU_MASK_NONE;
 
-#define CRASH_HANDLER_MAX 2
+#define CRASH_HANDLER_MAX 3
 /* NULL terminated list of shutdown handles */
 static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1];
 static DEFINE_SPINLOCK(crash_handlers_lock);
@@ -125,7 +125,7 @@
 	smp_wmb();
 
 	/*
-	 * FIXME: Until we will have the way to stop other CPUSs reliabally,
+	 * FIXME: Until we will have the way to stop other CPUs reliably,
 	 * the crash CPU will send an IPI and wait for other CPUs to
 	 * respond.
 	 * Delay of at least 10 seconds.
@@ -254,72 +254,6 @@
 	cpus_in_sr = CPU_MASK_NONE;
 }
 #endif
-#ifdef CONFIG_SPU_BASE
-
-#include <asm/spu.h>
-#include <asm/spu_priv1.h>
-
-struct crash_spu_info {
-	struct spu *spu;
-	u32 saved_spu_runcntl_RW;
-	u32 saved_spu_status_R;
-	u32 saved_spu_npc_RW;
-	u64 saved_mfc_sr1_RW;
-	u64 saved_mfc_dar;
-	u64 saved_mfc_dsisr;
-};
-
-#define CRASH_NUM_SPUS	16	/* Enough for current hardware */
-static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS];
-
-static void crash_kexec_stop_spus(void)
-{
-	struct spu *spu;
-	int i;
-	u64 tmp;
-
-	for (i = 0; i < CRASH_NUM_SPUS; i++) {
-		if (!crash_spu_info[i].spu)
-			continue;
-
-		spu = crash_spu_info[i].spu;
-
-		crash_spu_info[i].saved_spu_runcntl_RW =
-			in_be32(&spu->problem->spu_runcntl_RW);
-		crash_spu_info[i].saved_spu_status_R =
-			in_be32(&spu->problem->spu_status_R);
-		crash_spu_info[i].saved_spu_npc_RW =
-			in_be32(&spu->problem->spu_npc_RW);
-
-		crash_spu_info[i].saved_mfc_dar    = spu_mfc_dar_get(spu);
-		crash_spu_info[i].saved_mfc_dsisr  = spu_mfc_dsisr_get(spu);
-		tmp = spu_mfc_sr1_get(spu);
-		crash_spu_info[i].saved_mfc_sr1_RW = tmp;
-
-		tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
-		spu_mfc_sr1_set(spu, tmp);
-
-		__delay(200);
-	}
-}
-
-void crash_register_spus(struct list_head *list)
-{
-	struct spu *spu;
-
-	list_for_each_entry(spu, list, full_list) {
-		if (WARN_ON(spu->number >= CRASH_NUM_SPUS))
-			continue;
-
-		crash_spu_info[spu->number].spu = spu;
-	}
-}
-
-#else
-static inline void crash_kexec_stop_spus(void)
-{
-}
-#endif /* CONFIG_SPU_BASE */
 
 /*
  * Register a function to be called on shutdown.  Only use this if you
@@ -439,8 +373,6 @@
 	crash_shutdown_cpu = -1;
 	__debugger_fault_handler = old_handler;
 
-	crash_kexec_stop_spus();
-
 	if (ppc_md.kexec_cpu_down)
 		ppc_md.kexec_cpu_down(1, 0);
 }
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index c22dc1e..56212bc 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -880,7 +880,18 @@
 	 */
 	andi.	r10,r9,MSR_EE
 	beq	1f
+	/*
+	 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
+	 * which is the stack frame here, we need to force a stack frame
+	 * in case we came from user space.
+	 */
+	stwu	r1,-32(r1)
+	mflr	r0
+	stw	r0,4(r1)
+	stwu	r1,-32(r1)
 	bl	trace_hardirqs_on
+	lwz	r1,0(r1)
+	lwz	r1,0(r1)
 	lwz	r9,_MSR(r1)
 1:
 #endif /* CONFIG_TRACE_IRQFLAGS */
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index df7e20c..49a170a 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -15,6 +15,7 @@
 #include <linux/memblock.h>
 #include <linux/of.h>
 #include <linux/irq.h>
+#include <linux/ftrace.h>
 
 #include <asm/machdep.h>
 #include <asm/prom.h>
@@ -44,10 +45,7 @@
 
 void machine_crash_shutdown(struct pt_regs *regs)
 {
-	if (ppc_md.machine_crash_shutdown)
-		ppc_md.machine_crash_shutdown(regs);
-	else
-		default_machine_crash_shutdown(regs);
+	default_machine_crash_shutdown(regs);
 }
 
 /*
@@ -65,8 +63,6 @@
 
 void machine_kexec_cleanup(struct kimage *image)
 {
-	if (ppc_md.machine_kexec_cleanup)
-		ppc_md.machine_kexec_cleanup(image);
 }
 
 void arch_crash_save_vmcoreinfo(void)
@@ -87,10 +83,13 @@
  */
 void machine_kexec(struct kimage *image)
 {
-	if (ppc_md.machine_kexec)
-		ppc_md.machine_kexec(image);
-	else
-		default_machine_kexec(image);
+	int save_ftrace_enabled;
+
+	save_ftrace_enabled = __ftrace_enabled_save();
+
+	default_machine_kexec(image);
+
+	__ftrace_enabled_restore(save_ftrace_enabled);
 
 	/* Fall back to normal restart if we're still alive. */
 	machine_restart(NULL);
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c
index 4dcf5f8..b0dc8f7 100644
--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
@@ -596,6 +596,7 @@
 			if (left <= 0)
 				left = period;
 			record = 1;
+			event->hw.last_period = event->hw.sample_period;
 		}
 		if (left < 0x80000000LL)
 			val = 0x80000000LL - left;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 84906d3..7a1d5cb 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -631,7 +631,7 @@
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
 		printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
 #else
-		printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
+		printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
 #endif
 	printk("TASK = %p[%d] '%s' THREAD: %p",
 	       current, task_pid_nr(current), current->comm, task_thread_info(current));
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 2b442e6..bf5f5ce 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -256,31 +256,16 @@
 	struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
 	struct rtas_update_flash_t *uf;
 	char msg[RTAS_MSG_MAXLEN];
-	int msglen;
 
-	uf = (struct rtas_update_flash_t *) dp->data;
+	uf = dp->data;
 
 	if (!strcmp(dp->name, FIRMWARE_FLASH_NAME)) {
 		get_flash_status_msg(uf->status, msg);
 	} else {	   /* FIRMWARE_UPDATE_NAME */
 		sprintf(msg, "%d\n", uf->status);
 	}
-	msglen = strlen(msg);
-	if (msglen > count)
-		msglen = count;
 
-	if (ppos && *ppos != 0)
-		return 0;	/* be cheap */
-
-	if (!access_ok(VERIFY_WRITE, buf, msglen))
-		return -EINVAL;
-
-	if (copy_to_user(buf, msg, msglen))
-		return -EFAULT;
-
-	if (ppos)
-		*ppos = msglen;
-	return msglen;
+	return simple_read_from_buffer(buf, count, ppos, msg, strlen(msg));
 }
 
 /* constructor for flash_block_cache */
@@ -394,26 +379,13 @@
 	char msg[RTAS_MSG_MAXLEN];
 	int msglen;
 
-	args_buf = (struct rtas_manage_flash_t *) dp->data;
+	args_buf = dp->data;
 	if (args_buf == NULL)
 		return 0;
 
 	msglen = sprintf(msg, "%d\n", args_buf->status);
-	if (msglen > count)
-		msglen = count;
 
-	if (ppos && *ppos != 0)
-		return 0;	/* be cheap */
-
-	if (!access_ok(VERIFY_WRITE, buf, msglen))
-		return -EINVAL;
-
-	if (copy_to_user(buf, msg, msglen))
-		return -EFAULT;
-
-	if (ppos)
-		*ppos = msglen;
-	return msglen;
+	return simple_read_from_buffer(buf, count, ppos, msg, msglen);
 }
 
 static ssize_t manage_flash_write(struct file *file, const char __user *buf,
@@ -495,24 +467,11 @@
 	char msg[RTAS_MSG_MAXLEN];
 	int msglen;
 
-	args_buf = (struct rtas_validate_flash_t *) dp->data;
+	args_buf = dp->data;
 
-	if (ppos && *ppos != 0)
-		return 0;	/* be cheap */
-	
 	msglen = get_validate_flash_msg(args_buf, msg);
-	if (msglen > count)
-		msglen = count;
 
-	if (!access_ok(VERIFY_WRITE, buf, msglen))
-		return -EINVAL;
-
-	if (copy_to_user(buf, msg, msglen))
-		return -EFAULT;
-
-	if (ppos)
-		*ppos = msglen;
-	return msglen;
+	return simple_read_from_buffer(buf, count, ppos, msg, msglen);
 }
 
 static ssize_t validate_flash_write(struct file *file, const char __user *buf,
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 0438f81..049dbec 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -160,7 +160,7 @@
 	/* rtas fixed header */
 	len = 8;
 	err = (struct rtas_error_log *)buf;
-	if (err->extended_log_length) {
+	if (err->extended && err->extended_log_length) {
 
 		/* extended header */
 		len += err->extended_log_length;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 09e4dea..09d31db 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -265,11 +265,26 @@
 {
 	u64 sst, ust;
 
-	sst = scan_dispatch_log(get_paca()->starttime_user);
-	ust = scan_dispatch_log(get_paca()->starttime);
-	get_paca()->system_time -= sst;
-	get_paca()->user_time -= ust;
-	get_paca()->stolen_time += ust + sst;
+	u8 save_soft_enabled = local_paca->soft_enabled;
+	u8 save_hard_enabled = local_paca->hard_enabled;
+
+	/* We are called early in the exception entry, before
+	 * soft/hard_enabled are sync'ed to the expected state
+	 * for the exception. We are hard disabled but the PACA
+	 * needs to reflect that so various debug stuff doesn't
+	 * complain
+	 */
+	local_paca->soft_enabled = 0;
+	local_paca->hard_enabled = 0;
+
+	sst = scan_dispatch_log(local_paca->starttime_user);
+	ust = scan_dispatch_log(local_paca->starttime);
+	local_paca->system_time -= sst;
+	local_paca->user_time -= ust;
+	local_paca->stolen_time += ust + sst;
+
+	local_paca->soft_enabled = save_soft_enabled;
+	local_paca->hard_enabled = save_hard_enabled;
 }
 
 static inline u64 calculate_stolen_time(u64 stop_tb)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 1b2cdc8..bd74fac 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -626,12 +626,6 @@
 	if (recover > 0)
 		return;
 
-	if (user_mode(regs)) {
-		regs->msr |= MSR_RI;
-		_exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
-		return;
-	}
-
 #if defined(CONFIG_8xx) && defined(CONFIG_PCI)
 	/* the qspan pci read routines can cause machine checks -- Cort
 	 *
@@ -643,16 +637,12 @@
 	return;
 #endif
 
-	if (debugger_fault_handler(regs)) {
-		regs->msr |= MSR_RI;
+	if (debugger_fault_handler(regs))
 		return;
-	}
 
 	if (check_io_access(regs))
 		return;
 
-	if (debugger_fault_handler(regs))
-		return;
 	die("Machine check", regs, SIGBUS);
 
 	/* Must die if the interrupt is not recoverable */
diff --git a/arch/powerpc/lib/feature-fixups-test.S b/arch/powerpc/lib/feature-fixups-test.S
index cb73748..f461311 100644
--- a/arch/powerpc/lib/feature-fixups-test.S
+++ b/arch/powerpc/lib/feature-fixups-test.S
@@ -172,6 +172,25 @@
 3:	or	3,3,3
 
 
+#if 0
+/* Test that if we have a larger else case the assembler spots it and
+ * reports an error. #if 0'ed so as not to break the build normally.
+ */
+ftr_fixup_test7:
+	or	1,1,1
+BEGIN_FTR_SECTION
+	or	2,2,2
+	or	2,2,2
+	or	2,2,2
+FTR_SECTION_ELSE
+	or	3,3,3
+	or	3,3,3
+	or	3,3,3
+	or	3,3,3
+ALT_FTR_SECTION_END(0, 1)
+	or	1,1,1
+#endif
+
 #define	MAKE_MACRO_TEST(TYPE)						\
 globl(ftr_fixup_test_ ##TYPE##_macros)					\
 	or	1,1,1;							\
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index bf5cb91..fd48123 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -186,7 +186,7 @@
 	dbg("removing cpu %lu from node %d\n", cpu, node);
 
 	if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
-		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
+		cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
 	} else {
 		printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
 		       cpu, node);
@@ -1289,10 +1289,9 @@
 }
 #endif /* CONFIG_MEMORY_HOTPLUG */
 
-/* Vrtual Processor Home Node (VPHN) support */
+/* Virtual Processor Home Node (VPHN) support */
 #ifdef CONFIG_PPC_SPLPAR
-#define VPHN_NR_CHANGE_CTRS (8)
-static u8 vphn_cpu_change_counts[NR_CPUS][VPHN_NR_CHANGE_CTRS];
+static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
 static cpumask_t cpu_associativity_changes_mask;
 static int vphn_enabled;
 static void set_topology_timer(void);
@@ -1303,16 +1302,18 @@
  */
 static void setup_cpu_associativity_change_counters(void)
 {
-	int cpu = 0;
+	int cpu;
+
+	/* The VPHN feature supports a maximum of 8 reference points */
+	BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
 
 	for_each_possible_cpu(cpu) {
-		int i = 0;
+		int i;
 		u8 *counts = vphn_cpu_change_counts[cpu];
 		volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
 
-		for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) {
+		for (i = 0; i < distance_ref_points_depth; i++)
 			counts[i] = hypervisor_counts[i];
-		}
 	}
 }
 
@@ -1329,7 +1330,7 @@
  */
 static int update_cpu_associativity_changes_mask(void)
 {
-	int cpu = 0, nr_cpus = 0;
+	int cpu, nr_cpus = 0;
 	cpumask_t *changes = &cpu_associativity_changes_mask;
 
 	cpumask_clear(changes);
@@ -1339,8 +1340,8 @@
 		u8 *counts = vphn_cpu_change_counts[cpu];
 		volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
 
-		for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) {
-			if (hypervisor_counts[i] > counts[i]) {
+		for (i = 0; i < distance_ref_points_depth; i++) {
+			if (hypervisor_counts[i] != counts[i]) {
 				counts[i] = hypervisor_counts[i];
 				changed = 1;
 			}
@@ -1354,8 +1355,11 @@
 	return nr_cpus;
 }
 
-/* 6 64-bit registers unpacked into 12 32-bit associativity values */
-#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32))
+/*
+ * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
+ * the complete property we have to add the length in the first cell.
+ */
+#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
 
 /*
  * Convert the associativity domain numbers returned from the hypervisor
@@ -1363,15 +1367,14 @@
  */
 static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
 {
-	int i = 0;
-	int nr_assoc_doms = 0;
+	int i, nr_assoc_doms = 0;
 	const u16 *field = (const u16*) packed;
 
 #define VPHN_FIELD_UNUSED	(0xffff)
 #define VPHN_FIELD_MSB		(0x8000)
 #define VPHN_FIELD_MASK		(~VPHN_FIELD_MSB)
 
-	for (i = 0; i < VPHN_ASSOC_BUFSIZE; i++) {
+	for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
 		if (*field == VPHN_FIELD_UNUSED) {
 			/* All significant fields processed, and remaining
 			 * fields contain the reserved value of all 1's.
@@ -1379,14 +1382,12 @@
 			 */
 			unpacked[i] = *((u32*)field);
 			field += 2;
-		}
-		else if (*field & VPHN_FIELD_MSB) {
+		} else if (*field & VPHN_FIELD_MSB) {
 			/* Data is in the lower 15 bits of this field */
 			unpacked[i] = *field & VPHN_FIELD_MASK;
 			field++;
 			nr_assoc_doms++;
-		}
-		else {
+		} else {
 			/* Data is in the lower 15 bits of this field
 			 * concatenated with the next 16 bit field
 			 */
@@ -1396,6 +1397,9 @@
 		}
 	}
 
+	/* The first cell contains the length of the property */
+	unpacked[0] = nr_assoc_doms;
+
 	return nr_assoc_doms;
 }
 
@@ -1405,7 +1409,7 @@
  */
 static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
 {
-	long rc = 0;
+	long rc;
 	long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
 	u64 flags = 1;
 	int hwcpu = get_hard_smp_processor_id(cpu);
@@ -1419,7 +1423,7 @@
 static long vphn_get_associativity(unsigned long cpu,
 					unsigned int *associativity)
 {
-	long rc = 0;
+	long rc;
 
 	rc = hcall_vphn(cpu, associativity);
 
@@ -1445,9 +1449,9 @@
  */
 int arch_update_cpu_topology(void)
 {
-	int cpu = 0, nid = 0, old_nid = 0;
+	int cpu, nid, old_nid;
 	unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
-	struct sys_device *sysdev = NULL;
+	struct sys_device *sysdev;
 
 	for_each_cpu_mask(cpu, cpu_associativity_changes_mask) {
 		vphn_get_associativity(cpu, associativity);
@@ -1512,7 +1516,8 @@
 {
 	int rc = 0;
 
-	if (firmware_has_feature(FW_FEATURE_VPHN)) {
+	if (firmware_has_feature(FW_FEATURE_VPHN) &&
+	    get_lppaca()->shared_proc) {
 		vphn_enabled = 1;
 		setup_cpu_associativity_change_counters();
 		init_timer_deferrable(&topology_timer);
diff --git a/arch/powerpc/platforms/83xx/mpc830x_rdb.c b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
index 661d354..d0c4e15 100644
--- a/arch/powerpc/platforms/83xx/mpc830x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
@@ -57,12 +57,12 @@
 	ipic_set_default_priority();
 }
 
-struct const char *board[] __initdata = {
+static const char *board[] __initdata = {
 	"MPC8308RDB",
 	"fsl,mpc8308rdb",
 	"denx,mpc8308_p1m",
 	NULL
-}
+};
 
 /*
  * Called very early, MMU is off, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
index b54cd73..f859ead 100644
--- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
@@ -60,11 +60,11 @@
 	ipic_set_default_priority();
 }
 
-struct const char *board[] __initdata = {
+static const char *board[] __initdata = {
 	"MPC8313ERDB",
 	"fsl,mpc8315erdb",
 	NULL
-}
+};
 
 /*
  * Called very early, MMU is off, device-tree isn't unflattened
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index 0fea881..82a4345 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -35,6 +35,8 @@
 
 /* system i/o configuration register high */
 #define MPC83XX_SICRH_OFFS         0x118
+#define MPC8308_SICRH_USB_MASK     0x000c0000
+#define MPC8308_SICRH_USB_ULPI     0x00040000
 #define MPC834X_SICRH_USB_UTMI     0x00020000
 #define MPC831X_SICRH_USB_MASK     0x000000e0
 #define MPC831X_SICRH_USB_ULPI     0x000000a0
diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c
index 3ba4bb7..2c64164 100644
--- a/arch/powerpc/platforms/83xx/usb.c
+++ b/arch/powerpc/platforms/83xx/usb.c
@@ -127,7 +127,8 @@
 
 	/* Configure clock */
 	immr_node = of_get_parent(np);
-	if (immr_node && of_device_is_compatible(immr_node, "fsl,mpc8315-immr"))
+	if (immr_node && (of_device_is_compatible(immr_node, "fsl,mpc8315-immr") ||
+			of_device_is_compatible(immr_node, "fsl,mpc8308-immr")))
 		clrsetbits_be32(immap + MPC83XX_SCCR_OFFS,
 		                MPC8315_SCCR_USB_MASK,
 		                MPC8315_SCCR_USB_DRCM_01);
@@ -138,7 +139,11 @@
 
 	/* Configure pin mux for ULPI.  There is no pin mux for UTMI */
 	if (prop && !strcmp(prop, "ulpi")) {
-		if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) {
+		if (of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) {
+			clrsetbits_be32(immap + MPC83XX_SICRH_OFFS,
+					MPC8308_SICRH_USB_MASK,
+					MPC8308_SICRH_USB_ULPI);
+		} else if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) {
 			clrsetbits_be32(immap + MPC83XX_SICRL_OFFS,
 					MPC8315_SICRL_USB_MASK,
 					MPC8315_SICRL_USB_ULPI);
@@ -173,6 +178,9 @@
 		     !strcmp(prop, "utmi"))) {
 		u32 refsel;
 
+		if (of_device_is_compatible(immr_node, "fsl,mpc8308-immr"))
+			goto out;
+
 		if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr"))
 			refsel = CONTROL_REFSEL_24MHZ;
 		else
@@ -186,9 +194,11 @@
 		temp = CONTROL_PHY_CLK_SEL_ULPI;
 #ifdef CONFIG_USB_OTG
 		/* Set OTG_PORT */
-		dr_mode = of_get_property(np, "dr_mode", NULL);
-		if (dr_mode && !strcmp(dr_mode, "otg"))
-			temp |= CONTROL_OTG_PORT;
+		if (!of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) {
+			dr_mode = of_get_property(np, "dr_mode", NULL);
+			if (dr_mode && !strcmp(dr_mode, "otg"))
+				temp |= CONTROL_OTG_PORT;
+		}
 #endif /* CONFIG_USB_OTG */
 		out_be32(usb_regs + FSL_USB2_CONTROL_OFFS, temp);
 	} else {
@@ -196,6 +206,7 @@
 		ret = -EINVAL;
 	}
 
+out:
 	iounmap(usb_regs);
 	of_node_put(np);
 	return ret;
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
index 968c1c0..d809836 100644
--- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c
+++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
@@ -39,8 +39,6 @@
 };
 static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info);
 
-static struct workqueue_struct *kspugov_wq;
-
 static int calc_freq(struct spu_gov_info_struct *info)
 {
 	int cpu;
@@ -71,14 +69,14 @@
 	__cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H);
 
 	delay = usecs_to_jiffies(info->poll_int);
-	queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay);
+	schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
 }
 
 static void spu_gov_init_work(struct spu_gov_info_struct *info)
 {
 	int delay = usecs_to_jiffies(info->poll_int);
 	INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work);
-	queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay);
+	schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
 }
 
 static void spu_gov_cancel_work(struct spu_gov_info_struct *info)
@@ -152,27 +150,15 @@
 {
 	int ret;
 
-	kspugov_wq = create_workqueue("kspugov");
-	if (!kspugov_wq) {
-		printk(KERN_ERR "creation of kspugov failed\n");
-		ret = -EFAULT;
-		goto out;
-	}
-
 	ret = cpufreq_register_governor(&spu_governor);
-	if (ret) {
+	if (ret)
 		printk(KERN_ERR "registration of governor failed\n");
-		destroy_workqueue(kspugov_wq);
-		goto out;
-	}
-out:
 	return ret;
 }
 
 static void __exit spu_gov_exit(void)
 {
 	cpufreq_unregister_governor(&spu_governor);
-	destroy_workqueue(kspugov_wq);
 }
 
 
diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c
index 1b57490..d31c594 100644
--- a/arch/powerpc/platforms/cell/qpace_setup.c
+++ b/arch/powerpc/platforms/cell/qpace_setup.c
@@ -145,9 +145,4 @@
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= qpace_progress,
 	.init_IRQ		= iic_init_IRQ,
-#ifdef CONFIG_KEXEC
-	.machine_kexec		= default_machine_kexec,
-	.machine_kexec_prepare	= default_machine_kexec_prepare,
-	.machine_crash_shutdown	= default_machine_crash_shutdown,
-#endif
 };
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 8547e86..acfacce 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -37,6 +37,7 @@
 #include <asm/spu_csa.h>
 #include <asm/xmon.h>
 #include <asm/prom.h>
+#include <asm/kexec.h>
 
 const struct spu_management_ops *spu_management_ops;
 EXPORT_SYMBOL_GPL(spu_management_ops);
@@ -727,6 +728,75 @@
 
 static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
 
+#ifdef CONFIG_KEXEC
+
+struct crash_spu_info {
+	struct spu *spu;
+	u32 saved_spu_runcntl_RW;
+	u32 saved_spu_status_R;
+	u32 saved_spu_npc_RW;
+	u64 saved_mfc_sr1_RW;
+	u64 saved_mfc_dar;
+	u64 saved_mfc_dsisr;
+};
+
+#define CRASH_NUM_SPUS	16	/* Enough for current hardware */
+static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS];
+
+static void crash_kexec_stop_spus(void)
+{
+	struct spu *spu;
+	int i;
+	u64 tmp;
+
+	for (i = 0; i < CRASH_NUM_SPUS; i++) {
+		if (!crash_spu_info[i].spu)
+			continue;
+
+		spu = crash_spu_info[i].spu;
+
+		crash_spu_info[i].saved_spu_runcntl_RW =
+			in_be32(&spu->problem->spu_runcntl_RW);
+		crash_spu_info[i].saved_spu_status_R =
+			in_be32(&spu->problem->spu_status_R);
+		crash_spu_info[i].saved_spu_npc_RW =
+			in_be32(&spu->problem->spu_npc_RW);
+
+		crash_spu_info[i].saved_mfc_dar    = spu_mfc_dar_get(spu);
+		crash_spu_info[i].saved_mfc_dsisr  = spu_mfc_dsisr_get(spu);
+		tmp = spu_mfc_sr1_get(spu);
+		crash_spu_info[i].saved_mfc_sr1_RW = tmp;
+
+		tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
+		spu_mfc_sr1_set(spu, tmp);
+
+		__delay(200);
+	}
+}
+
+static void crash_register_spus(struct list_head *list)
+{
+	struct spu *spu;
+	int ret;
+
+	list_for_each_entry(spu, list, full_list) {
+		if (WARN_ON(spu->number >= CRASH_NUM_SPUS))
+			continue;
+
+		crash_spu_info[spu->number].spu = spu;
+	}
+
+	ret = crash_shutdown_register(&crash_kexec_stop_spus);
+	if (ret)
+		printk(KERN_ERR "Could not register SPU crash handler");
+}
+
+#else
+static inline void crash_register_spus(struct list_head *list)
+{
+}
+#endif
+
 static int __init init_spu_base(void)
 {
 	int i, ret = 0;
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index 02f7b11..3c7c3f8 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -219,24 +219,17 @@
 	loff_t pos = *ppos;
 	int ret;
 
-	if (pos < 0)
-		return -EINVAL;
 	if (pos > LS_SIZE)
 		return -EFBIG;
-	if (size > LS_SIZE - pos)
-		size = LS_SIZE - pos;
 
 	ret = spu_acquire(ctx);
 	if (ret)
 		return ret;
 
 	local_store = ctx->ops->get_ls(ctx);
-	ret = copy_from_user(local_store + pos, buffer, size);
+	size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size);
 	spu_release(ctx);
 
-	if (ret)
-		return -EFAULT;
-	*ppos = pos + size;
 	return size;
 }
 
@@ -574,18 +567,15 @@
 	if (*pos >= sizeof(lscsa->gprs))
 		return -EFBIG;
 
-	size = min_t(ssize_t, sizeof(lscsa->gprs) - *pos, size);
-	*pos += size;
-
 	ret = spu_acquire_saved(ctx);
 	if (ret)
 		return ret;
 
-	ret = copy_from_user((char *)lscsa->gprs + *pos - size,
-			     buffer, size) ? -EFAULT : size;
+	size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos,
+					buffer, size);
 
 	spu_release_saved(ctx);
-	return ret;
+	return size;
 }
 
 static const struct file_operations spufs_regs_fops = {
@@ -630,18 +620,15 @@
 	if (*pos >= sizeof(lscsa->fpcr))
 		return -EFBIG;
 
-	size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
-
 	ret = spu_acquire_saved(ctx);
 	if (ret)
 		return ret;
 
-	*pos += size;
-	ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
-			     buffer, size) ? -EFAULT : size;
+	size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos,
+					buffer, size);
 
 	spu_release_saved(ctx);
-	return ret;
+	return size;
 }
 
 static const struct file_operations spufs_fpcr_fops = {
diff --git a/arch/powerpc/platforms/embedded6xx/gamecube.c b/arch/powerpc/platforms/embedded6xx/gamecube.c
index 1106fd9..a138e14 100644
--- a/arch/powerpc/platforms/embedded6xx/gamecube.c
+++ b/arch/powerpc/platforms/embedded6xx/gamecube.c
@@ -75,14 +75,6 @@
 	flipper_quiesce();
 }
 
-#ifdef CONFIG_KEXEC
-static int gamecube_kexec_prepare(struct kimage *image)
-{
-	return 0;
-}
-#endif /* CONFIG_KEXEC */
-
-
 define_machine(gamecube) {
 	.name			= "gamecube",
 	.probe			= gamecube_probe,
@@ -95,9 +87,6 @@
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 	.machine_shutdown	= gamecube_shutdown,
-#ifdef CONFIG_KEXEC
-	.machine_kexec_prepare	= gamecube_kexec_prepare,
-#endif
 };
 
 
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 649473a..1b5dc1a 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -18,7 +18,6 @@
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/seq_file.h>
-#include <linux/kexec.h>
 #include <linux/of_platform.h>
 #include <linux/memblock.h>
 #include <mm/mmu_decl.h>
@@ -226,13 +225,6 @@
 	flipper_quiesce();
 }
 
-#ifdef CONFIG_KEXEC
-static int wii_machine_kexec_prepare(struct kimage *image)
-{
-	return 0;
-}
-#endif /* CONFIG_KEXEC */
-
 define_machine(wii) {
 	.name			= "wii",
 	.probe			= wii_probe,
@@ -246,9 +238,6 @@
 	.calibrate_decr		= generic_calibrate_decr,
 	.progress		= udbg_progress,
 	.machine_shutdown	= wii_shutdown,
-#ifdef CONFIG_KEXEC
-	.machine_kexec_prepare	= wii_machine_kexec_prepare,
-#endif
 };
 
 static struct of_device_id wii_of_bus[] = {
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
index 47a20cf..e5bc9f7 100644
--- a/arch/powerpc/platforms/iseries/Kconfig
+++ b/arch/powerpc/platforms/iseries/Kconfig
@@ -2,7 +2,7 @@
 	bool "IBM Legacy iSeries"
 	depends on PPC64 && PPC_BOOK3S
 	select PPC_INDIRECT_IO
-	select PPC_PCI_CHOICE if EMBEDDED
+	select PPC_PCI_CHOICE if EXPERT
 
 menu "iSeries device drivers"
 	depends on PPC_ISERIES
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 5d1b743..5b3da4b 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -10,7 +10,7 @@
 	select RTAS_ERROR_LOGGING
 	select PPC_UDBG_16550
 	select PPC_NATIVE
-	select PPC_PCI_CHOICE if EMBEDDED
+	select PPC_PCI_CHOICE if EXPERT
 	default y
 
 config PPC_SPLPAR
@@ -24,9 +24,9 @@
 	  two or more partitions.
 
 config EEH
-	bool "PCI Extended Error Handling (EEH)" if EMBEDDED
+	bool "PCI Extended Error Handling (EEH)" if EXPERT
 	depends on PPC_PSERIES && PCI
-	default y if !EMBEDDED
+	default y if !EXPERT
 
 config PSERIES_MSI
        bool
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index 53cbd53..77d38a5 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -61,13 +61,3 @@
 {
 	ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_xics;
 }
-
-static int __init pseries_kexec_setup(void)
-{
-	ppc_md.machine_kexec = default_machine_kexec;
-	ppc_md.machine_kexec_prepare = default_machine_kexec_prepare;
-	ppc_md.machine_crash_shutdown = default_machine_crash_shutdown;
-
-	return 0;
-}
-machine_device_initcall(pseries, pseries_kexec_setup);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 5d3ea9f..ca5d589 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -713,6 +713,13 @@
 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
 extern long hcall_tracepoint_refcount;
 
+/* 
+ * Since the tracing code might execute hcalls we need to guard against
+ * recursion. One example of this are spinlocks calling H_YIELD on
+ * shared processor partitions.
+ */
+static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
+
 void hcall_tracepoint_regfunc(void)
 {
 	hcall_tracepoint_refcount++;
@@ -725,12 +732,42 @@
 
 void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
 {
+	unsigned long flags;
+	unsigned int *depth;
+
+	local_irq_save(flags);
+
+	depth = &__get_cpu_var(hcall_trace_depth);
+
+	if (*depth)
+		goto out;
+
+	(*depth)++;
 	trace_hcall_entry(opcode, args);
+	(*depth)--;
+
+out:
+	local_irq_restore(flags);
 }
 
 void __trace_hcall_exit(long opcode, unsigned long retval,
 			unsigned long *retbuf)
 {
+	unsigned long flags;
+	unsigned int *depth;
+
+	local_irq_save(flags);
+
+	depth = &__get_cpu_var(hcall_trace_depth);
+
+	if (*depth)
+		goto out;
+
+	(*depth)++;
 	trace_hcall_exit(opcode, retval, retbuf);
+	(*depth)--;
+
+out:
+	local_irq_restore(flags);
 }
 #endif
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index a4fc6da..c55d7ad 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -54,7 +54,8 @@
 static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX];
 static DEFINE_SPINLOCK(ras_log_buf_lock);
 
-static char mce_data_buf[RTAS_ERROR_LOG_MAX];
+static char global_mce_data_buf[RTAS_ERROR_LOG_MAX];
+static DEFINE_PER_CPU(__u64, mce_data_buf);
 
 static int ras_get_sensor_state_token;
 static int ras_check_exception_token;
@@ -196,12 +197,24 @@
 	return IRQ_HANDLED;
 }
 
-/* Get the error information for errors coming through the
+/*
+ * Some versions of FWNMI place the buffer inside the 4kB page starting at
+ * 0x7000. Other versions place it inside the rtas buffer. We check both.
+ */
+#define VALID_FWNMI_BUFFER(A) \
+	((((A) >= 0x7000) && ((A) < 0x7ff0)) || \
+	(((A) >= rtas.base) && ((A) < (rtas.base + rtas.size - 16))))
+
+/*
+ * Get the error information for errors coming through the
  * FWNMI vectors.  The pt_regs' r3 will be updated to reflect
  * the actual r3 if possible, and a ptr to the error log entry
  * will be returned if found.
  *
- * The mce_data_buf does not have any locks or protection around it,
+ * If the RTAS error is not of the extended type, then we put it in a per
+ * cpu 64bit buffer. If it is the extended type we use global_mce_data_buf.
+ *
+ * The global_mce_data_buf does not have any locks or protection around it,
  * if a second machine check comes in, or a system reset is done
  * before we have logged the error, then we will get corruption in the
  * error log.  This is preferable over holding off on calling
@@ -210,20 +223,31 @@
  */
 static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
 {
-	unsigned long errdata = regs->gpr[3];
-	struct rtas_error_log *errhdr = NULL;
 	unsigned long *savep;
+	struct rtas_error_log *h, *errhdr = NULL;
 
-	if ((errdata >= 0x7000 && errdata < 0x7fff0) ||
-	    (errdata >= rtas.base && errdata < rtas.base + rtas.size - 16)) {
-		savep = __va(errdata);
-		regs->gpr[3] = savep[0];	/* restore original r3 */
-		memset(mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
-		memcpy(mce_data_buf, (char *)(savep + 1), RTAS_ERROR_LOG_MAX);
-		errhdr = (struct rtas_error_log *)mce_data_buf;
-	} else {
-		printk("FWNMI: corrupt r3\n");
+	if (!VALID_FWNMI_BUFFER(regs->gpr[3])) {
+		printk(KERN_ERR "FWNMI: corrupt r3\n");
+		return NULL;
 	}
+
+	savep = __va(regs->gpr[3]);
+	regs->gpr[3] = savep[0];	/* restore original r3 */
+
+	/* If it isn't an extended log we can use the per cpu 64bit buffer */
+	h = (struct rtas_error_log *)&savep[1];
+	if (!h->extended) {
+		memcpy(&__get_cpu_var(mce_data_buf), h, sizeof(__u64));
+		errhdr = (struct rtas_error_log *)&__get_cpu_var(mce_data_buf);
+	} else {
+		int len;
+
+		len = max_t(int, 8+h->extended_log_length, RTAS_ERROR_LOG_MAX);
+		memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
+		memcpy(global_mce_data_buf, h, len);
+		errhdr = (struct rtas_error_log *)global_mce_data_buf;
+	}
+
 	return errhdr;
 }
 
@@ -235,7 +259,7 @@
 {
 	int ret = rtas_call(rtas_token("ibm,nmi-interlock"), 0, 1, NULL);
 	if (ret != 0)
-		printk("FWNMI: nmi-interlock failed: %d\n", ret);
+		printk(KERN_ERR "FWNMI: nmi-interlock failed: %d\n", ret);
 }
 
 int pSeries_system_reset_exception(struct pt_regs *regs)
@@ -259,31 +283,43 @@
  * Return 1 if corrected (or delivered a signal).
  * Return 0 if there is nothing we can do.
  */
-static int recover_mce(struct pt_regs *regs, struct rtas_error_log * err)
+static int recover_mce(struct pt_regs *regs, struct rtas_error_log *err)
 {
-	int nonfatal = 0;
+	int recovered = 0;
 
-	if (err->disposition == RTAS_DISP_FULLY_RECOVERED) {
+	if (!(regs->msr & MSR_RI)) {
+		/* If MSR_RI isn't set, we cannot recover */
+		recovered = 0;
+
+	} else if (err->disposition == RTAS_DISP_FULLY_RECOVERED) {
 		/* Platform corrected itself */
-		nonfatal = 1;
-	} else if ((regs->msr & MSR_RI) &&
-		   user_mode(regs) &&
-		   err->severity == RTAS_SEVERITY_ERROR_SYNC &&
-		   err->disposition == RTAS_DISP_NOT_RECOVERED &&
-		   err->target == RTAS_TARGET_MEMORY &&
-		   err->type == RTAS_TYPE_ECC_UNCORR &&
-		   !(current->pid == 0 || is_global_init(current))) {
-		/* Kill off a user process with an ECC error */
-		printk(KERN_ERR "MCE: uncorrectable ecc error for pid %d\n",
-		       current->pid);
-		/* XXX something better for ECC error? */
-		_exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
-		nonfatal = 1;
+		recovered = 1;
+
+	} else if (err->disposition == RTAS_DISP_LIMITED_RECOVERY) {
+		/* Platform corrected itself but could be degraded */
+		printk(KERN_ERR "MCE: limited recovery, system may "
+		       "be degraded\n");
+		recovered = 1;
+
+	} else if (user_mode(regs) && !is_global_init(current) &&
+		   err->severity == RTAS_SEVERITY_ERROR_SYNC) {
+
+		/*
+		 * If we received a synchronous error when in userspace
+		 * kill the task. Firmware may report details of the fail
+		 * asynchronously, so we can't rely on the target and type
+		 * fields being valid here.
+		 */
+		printk(KERN_ERR "MCE: uncorrectable error, killing task "
+		       "%s:%d\n", current->comm, current->pid);
+
+		_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
+		recovered = 1;
 	}
 
-	log_error((char *)err, ERR_TYPE_RTAS_LOG, !nonfatal);
+	log_error((char *)err, ERR_TYPE_RTAS_LOG, 0);
 
-	return nonfatal;
+	return recovered;
 }
 
 /*
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 9f99bef..8c6cab0 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -1555,8 +1555,6 @@
 	saved_mcheck_exception = ppc_md.machine_check_exception;
 	ppc_md.machine_check_exception = fsl_rio_mcheck_exception;
 #endif
-	/* Ensure that RFXE is set */
-	mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000));
 
 	return 0;
 err:
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 7c13426..b0c8469 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -674,7 +674,8 @@
 	/* make sure mask gets to controller before we return to user */
 	do {
 		if (!loops--) {
-			printk(KERN_ERR "mpic_enable_irq timeout\n");
+			printk(KERN_ERR "%s: timeout on hwirq %u\n",
+			       __func__, src);
 			break;
 		}
 	} while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK);
@@ -695,7 +696,8 @@
 	/* make sure mask gets to controller before we return to user */
 	do {
 		if (!loops--) {
-			printk(KERN_ERR "mpic_enable_irq timeout\n");
+			printk(KERN_ERR "%s: timeout on hwirq %u\n",
+			       __func__, src);
 			break;
 		}
 	} while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK));
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index ff19efdf..636bcb8 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -406,7 +406,7 @@
 	  If unsure, say Y.
 
 config CHSC_SCH
-	def_tristate y
+	def_tristate m
 	prompt "Support for CHSC subchannels"
 	help
 	  This driver allows usage of CHSC subchannels. A CHSC subchannel
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
index f42dbab..48884f8 100644
--- a/arch/s390/crypto/sha_common.c
+++ b/arch/s390/crypto/sha_common.c
@@ -38,6 +38,7 @@
 		BUG_ON(ret != bsize);
 		data += bsize - index;
 		len -= bsize - index;
+		index = 0;
 	}
 
 	/* process as many blocks as possible */
diff --git a/arch/s390/include/asm/cacheflush.h b/arch/s390/include/asm/cacheflush.h
index 405cc97..7e1f776 100644
--- a/arch/s390/include/asm/cacheflush.h
+++ b/arch/s390/include/asm/cacheflush.h
@@ -1,29 +1,8 @@
 #ifndef _S390_CACHEFLUSH_H
 #define _S390_CACHEFLUSH_H
 
-/* Keep includes the same across arches.  */
-#include <linux/mm.h>
-
 /* Caches aren't brain-dead on the s390. */
-#define flush_cache_all()			do { } while (0)
-#define flush_cache_mm(mm)			do { } while (0)
-#define flush_cache_dup_mm(mm)			do { } while (0)
-#define flush_cache_range(vma, start, end)	do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page)			do { } while (0)
-#define flush_dcache_mmap_lock(mapping)		do { } while (0)
-#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
-#define flush_icache_range(start, end)		do { } while (0)
-#define flush_icache_page(vma,pg)		do { } while (0)
-#define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
-#define flush_cache_vmap(start, end)		do { } while (0)
-#define flush_cache_vunmap(start, end)		do { } while (0)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-	memcpy(dst, src, len)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
-	memcpy(dst, src, len)
+#include <asm-generic/cacheflush.h>
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
 void kernel_map_pages(struct page *page, int numpages, int enable);
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index bf3de04..2c79b64 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -148,11 +148,6 @@
  */
 extern unsigned long thread_saved_pc(struct task_struct *t);
 
-/*
- * Print register of task into buffer. Used in fs/proc/array.c.
- */
-extern void task_show_regs(struct seq_file *m, struct task_struct *task);
-
 extern void show_code(struct pt_regs *regs);
 
 unsigned long get_wchan(struct task_struct *p);
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index f1f644f..9074a54 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -22,6 +22,7 @@
  */
 
 #include <linux/mm.h>
+#include <linux/pagemap.h>
 #include <linux/swap.h>
 #include <asm/processor.h>
 #include <asm/pgalloc.h>
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 5eb78dd..b5a4a73 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -237,43 +237,6 @@
 	show_last_breaking_event(regs);
 }
 
-/* This is called from fs/proc/array.c */
-void task_show_regs(struct seq_file *m, struct task_struct *task)
-{
-	struct pt_regs *regs;
-
-	regs = task_pt_regs(task);
-	seq_printf(m, "task: %p, ksp: %p\n",
-		       task, (void *)task->thread.ksp);
-	seq_printf(m, "User PSW : %p %p\n",
-		       (void *) regs->psw.mask, (void *)regs->psw.addr);
-
-	seq_printf(m, "User GPRS: " FOURLONG,
-			  regs->gprs[0], regs->gprs[1],
-			  regs->gprs[2], regs->gprs[3]);
-	seq_printf(m, "           " FOURLONG,
-			  regs->gprs[4], regs->gprs[5],
-			  regs->gprs[6], regs->gprs[7]);
-	seq_printf(m, "           " FOURLONG,
-			  regs->gprs[8], regs->gprs[9],
-			  regs->gprs[10], regs->gprs[11]);
-	seq_printf(m, "           " FOURLONG,
-			  regs->gprs[12], regs->gprs[13],
-			  regs->gprs[14], regs->gprs[15]);
-	seq_printf(m, "User ACRS: %08x %08x %08x %08x\n",
-			  task->thread.acrs[0], task->thread.acrs[1],
-			  task->thread.acrs[2], task->thread.acrs[3]);
-	seq_printf(m, "           %08x %08x %08x %08x\n",
-			  task->thread.acrs[4], task->thread.acrs[5],
-			  task->thread.acrs[6], task->thread.acrs[7]);
-	seq_printf(m, "           %08x %08x %08x %08x\n",
-			  task->thread.acrs[8], task->thread.acrs[9],
-			  task->thread.acrs[10], task->thread.acrs[11]);
-	seq_printf(m, "           %08x %08x %08x %08x\n",
-			  task->thread.acrs[12], task->thread.acrs[13],
-			  task->thread.acrs[14], task->thread.acrs[15]);
-}
-
 static DEFINE_SPINLOCK(die_lock);
 
 void die(const char * str, struct pt_regs * regs, long err)
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index 07deaee..a6c4f7e 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -125,9 +125,9 @@
 	unsigned long tmp1;
 
 	asm volatile(
+		"   sacf  256\n"
 		"  "AHI"  %0,-1\n"
 		"   jo    5f\n"
-		"   sacf  256\n"
 		"   bras  %3,3f\n"
 		"0:"AHI"  %0,257\n"
 		"1: mvc   0(1,%1),0(%2)\n"
@@ -142,9 +142,8 @@
 		"3:"AHI"  %0,-256\n"
 		"   jnm   2b\n"
 		"4: ex    %0,1b-0b(%3)\n"
-		"   sacf  0\n"
 		"5: "SLR"  %0,%0\n"
-		"6:\n"
+		"6: sacf  0\n"
 		EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
 		: "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
 		: : "cc", "memory");
@@ -156,9 +155,9 @@
 	unsigned long tmp1, tmp2;
 
 	asm volatile(
+		"   sacf  256\n"
 		"  "AHI"  %0,-1\n"
 		"   jo    5f\n"
-		"   sacf  256\n"
 		"   bras  %3,3f\n"
 		"   xc    0(1,%1),0(%1)\n"
 		"0:"AHI"  %0,257\n"
@@ -178,9 +177,8 @@
 		"3:"AHI"  %0,-256\n"
 		"   jnm   2b\n"
 		"4: ex    %0,0(%3)\n"
-		"   sacf  0\n"
 		"5: "SLR"  %0,%0\n"
-		"6:\n"
+		"6: sacf  0\n"
 		EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
 		: "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
 		: : "cc", "memory");
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 0c719c6..e1850c2 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -336,7 +336,8 @@
 	page->flags ^= bits;
 	if (page->flags & FRAG_MASK) {
 		/* Page now has some free pgtable fragments. */
-		list_move(&page->lru, &mm->context.pgtable_list);
+		if (!list_empty(&page->lru))
+			list_move(&page->lru, &mm->context.pgtable_list);
 		page = NULL;
 	} else
 		/* All fragments of the 4K page have been freed. */
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index 4293fdcb..27b2295 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -1,5 +1,9 @@
 menu "Machine selection"
 
+config SCORE
+       def_bool y
+       select HAVE_GENERIC_HARDIRQS
+
 choice
 	prompt "System type"
 	default MACH_SPCT6600
@@ -53,9 +57,6 @@
 config SCHED_NO_NO_OMIT_FRAME_POINTER
 	def_bool y
 
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
 config GENERIC_SYSCALL_TABLE
 	def_bool y
 
@@ -68,9 +69,6 @@
 config 32BIT
 	def_bool y
 
-config GENERIC_HARDIRQS
-	def_bool y
-
 config ARCH_FLATMEM_ENABLE
 	def_bool y
 
diff --git a/arch/score/configs/spct6600_defconfig b/arch/score/configs/spct6600_defconfig
index 9883c50..df1edbf 100644
--- a/arch/score/configs/spct6600_defconfig
+++ b/arch/score/configs/spct6600_defconfig
@@ -9,7 +9,7 @@
 CONFIG_SYSFS_DEPRECATED_V2=y
 CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_KALLSYMS is not set
 # CONFIG_HOTPLUG is not set
 CONFIG_SLAB=y
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index fff2522..8a9011d 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -1,6 +1,6 @@
 config SUPERH
 	def_bool y
-	select EMBEDDED
+	select EXPERT
 	select CLKDEV_LOOKUP
 	select HAVE_IDE if HAS_IOPORT
 	select HAVE_MEMBLOCK
@@ -15,6 +15,7 @@
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_BZIP2
 	select HAVE_KERNEL_LZMA
+	select HAVE_KERNEL_XZ
 	select HAVE_KERNEL_LZO
 	select HAVE_SYSCALL_TRACEPOINTS
 	select HAVE_REGS_AND_STACK_ACCESS_API
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 9c8c6e1..e3d8170 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -200,7 +200,7 @@
 libs-$(CONFIG_SUPERH32)		:= arch/sh/lib/	$(libs-y)
 libs-$(CONFIG_SUPERH64)		:= arch/sh/lib64/ $(libs-y)
 
-BOOT_TARGETS = uImage uImage.bz2 uImage.gz uImage.lzma uImage.lzo \
+BOOT_TARGETS = uImage uImage.bz2 uImage.gz uImage.lzma uImage.xz uImage.lzo \
 	       uImage.srec uImage.bin zImage vmlinux.bin vmlinux.srec \
 	       romImage
 PHONY += $(BOOT_TARGETS)
@@ -230,5 +230,6 @@
 	@echo '* uImage.gz	           - Kernel-only image for U-Boot (gzip)'
 	@echo '  uImage.bz2	           - Kernel-only image for U-Boot (bzip2)'
 	@echo '  uImage.lzma	           - Kernel-only image for U-Boot (lzma)'
+	@echo '  uImage.xz	           - Kernel-only image for U-Boot (xz)'
 	@echo '  uImage.lzo	           - Kernel-only image for U-Boot (lzo)'
 endef
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 33b6629..701667a 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -1294,6 +1294,7 @@
 	i2c_register_board_info(1, i2c1_devices,
 				ARRAY_SIZE(i2c1_devices));
 
+#if defined(CONFIG_VIDEO_SH_VOU) || defined(CONFIG_VIDEO_SH_VOU_MODULE)
 	/* VOU */
 	gpio_request(GPIO_FN_DV_D15, NULL);
 	gpio_request(GPIO_FN_DV_D14, NULL);
@@ -1325,6 +1326,7 @@
 
 	/* Remove reset */
 	gpio_set_value(GPIO_PTG4, 1);
+#endif
 
 	return platform_add_devices(ecovec_devices,
 				    ARRAY_SIZE(ecovec_devices));
diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile
index 1ce6362..ba515d8 100644
--- a/arch/sh/boot/Makefile
+++ b/arch/sh/boot/Makefile
@@ -24,12 +24,13 @@
 suffix-$(CONFIG_KERNEL_GZIP)	:= gz
 suffix-$(CONFIG_KERNEL_BZIP2)	:= bz2
 suffix-$(CONFIG_KERNEL_LZMA)	:= lzma
+suffix-$(CONFIG_KERNEL_XZ)	:= xz
 suffix-$(CONFIG_KERNEL_LZO)	:= lzo
 
 targets := zImage vmlinux.srec romImage uImage uImage.srec uImage.gz \
-	   uImage.bz2 uImage.lzma uImage.lzo uImage.bin
+	   uImage.bz2 uImage.lzma uImage.xz uImage.lzo uImage.bin
 extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
-	   vmlinux.bin.lzo
+	   vmlinux.bin.xz vmlinux.bin.lzo
 subdir- := compressed romimage
 
 $(obj)/zImage: $(obj)/compressed/vmlinux FORCE
@@ -76,6 +77,9 @@
 $(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
 	$(call if_changed,lzma)
 
+$(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
+	$(call if_changed,xzkern)
+
 $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
 	$(call if_changed,lzo)
 
@@ -88,6 +92,9 @@
 $(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma
 	$(call if_changed,uimage,lzma)
 
+$(obj)/uImage.xz: $(obj)/vmlinux.bin.xz
+	$(call if_changed,uimage,xz)
+
 $(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo
 	$(call if_changed,uimage,lzo)
 
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index cfa5a08..e0b0293 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -6,7 +6,7 @@
 
 targets		:= vmlinux vmlinux.bin vmlinux.bin.gz \
 		   vmlinux.bin.bz2 vmlinux.bin.lzma \
-		   vmlinux.bin.lzo \
+		   vmlinux.bin.xz vmlinux.bin.lzo \
 		   head_$(BITS).o misc.o piggy.o
 
 OBJECTS = $(obj)/head_$(BITS).o $(obj)/misc.o $(obj)/cache.o
@@ -50,6 +50,8 @@
 	$(call if_changed,bzip2)
 $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
 	$(call if_changed,lzma)
+$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
+	$(call if_changed,xzkern)
 $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
 	$(call if_changed,lzo)
 
diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
index 27140a6..95470a4 100644
--- a/arch/sh/boot/compressed/misc.c
+++ b/arch/sh/boot/compressed/misc.c
@@ -61,6 +61,10 @@
 #include "../../../../lib/decompress_unlzma.c"
 #endif
 
+#ifdef CONFIG_KERNEL_XZ
+#include "../../../../lib/decompress_unxz.c"
+#endif
+
 #ifdef CONFIG_KERNEL_LZO
 #include "../../../../lib/decompress_unlzo.c"
 #endif
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index 083ea06..db85916 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -134,6 +134,7 @@
 extern void pgtable_cache_init(void);
 
 struct vm_area_struct;
+struct mm_struct;
 
 extern void __update_cache(struct vm_area_struct *vma,
 			   unsigned long address, pte_t pte);
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index c2b0aaa..672944f 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -230,10 +230,10 @@
 static int __init sh7750_devices_setup(void)
 {
 	if (mach_is_rts7751r2d()) {
-		platform_register_device(&scif_device);
+		platform_device_register(&scif_device);
 	} else {
-		platform_register_device(&sci_device);
-		platform_register_device(&scif_device);
+		platform_device_register(&sci_device);
+		platform_device_register(&scif_device);
 	}
 
 	return platform_add_devices(sh7750_devices,
diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c
index 948fdb6..38e8628 100644
--- a/arch/sh/kernel/topology.c
+++ b/arch/sh/kernel/topology.c
@@ -17,6 +17,7 @@
 static DEFINE_PER_CPU(struct cpu, cpu_devices);
 
 cpumask_t cpu_core_map[NR_CPUS];
+EXPORT_SYMBOL(cpu_core_map);
 
 static cpumask_t cpu_coregroup_map(unsigned int cpu)
 {
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 45d9c87..95695e9 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -50,6 +50,7 @@
 	select RTC_DRV_STARFIRE
 	select HAVE_PERF_EVENTS
 	select PERF_USE_VMALLOC
+	select HAVE_GENERIC_HARDIRQS
 
 config ARCH_DEFCONFIG
 	string
@@ -107,10 +108,6 @@
 config NEED_PER_CPU_PAGE_FIRST_CHUNK
 	def_bool y if SPARC64
 
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	bool
-	def_bool y if SPARC64
-
 config MMU
 	bool
 	default y
@@ -276,10 +273,6 @@
 	  can be controlled through /sys/devices/system/cpu/cpu#.
 	  Say N if you want to disable CPU hotplug.
 
-config GENERIC_HARDIRQS
-	bool
-	default y if SPARC64
-
 source "kernel/time/Kconfig"
 
 if SPARC64
diff --git a/arch/sparc/include/asm/pcr.h b/arch/sparc/include/asm/pcr.h
index a2f5c61..843e4fa 100644
--- a/arch/sparc/include/asm/pcr.h
+++ b/arch/sparc/include/asm/pcr.h
@@ -43,4 +43,6 @@
 
 extern u64 pcr_enable;
 
+extern int pcr_arch_init(void);
+
 #endif /* __PCR_H */
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 47977a7..72509d0 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -255,10 +255,9 @@
 static int iommu_alloc_ctx(struct iommu *iommu)
 {
 	int lowest = iommu->ctx_lowest_free;
-	int sz = IOMMU_NUM_CTXS - lowest;
-	int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
+	int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
 
-	if (unlikely(n == sz)) {
+	if (unlikely(n == IOMMU_NUM_CTXS)) {
 		n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
 		if (unlikely(n == lowest)) {
 			printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index ae96cf5..7c2ced6 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -167,5 +167,3 @@
 	unregister_perf_hsvc();
 	return err;
 }
-
-early_initcall(pcr_arch_init);
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index b6a2b8f..555a76d 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -49,6 +49,7 @@
 #include <asm/mdesc.h>
 #include <asm/ldc.h>
 #include <asm/hypervisor.h>
+#include <asm/pcr.h>
 
 #include "cpumap.h"
 
@@ -1358,6 +1359,7 @@
 
 void __init smp_cpus_done(unsigned int max_cpus)
 {
+	pcr_arch_init();
 }
 
 void smp_send_reschedule(int cpu)
diff --git a/arch/sparc/kernel/una_asm_32.S b/arch/sparc/kernel/una_asm_32.S
index 8cc0345..8f096e8 100644
--- a/arch/sparc/kernel/una_asm_32.S
+++ b/arch/sparc/kernel/una_asm_32.S
@@ -24,9 +24,9 @@
 	.globl	__do_int_store
 __do_int_store:
 	ld	[%o2], %g1
-	cmp	%1, 2
+	cmp	%o1, 2
 	be	2f
-	 cmp	%1, 4
+	 cmp	%o1, 4
 	be	1f
 	 srl	%g1, 24, %g2
 	srl	%g1, 16, %g7
diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c
index 764b3eb..48d00e7 100644
--- a/arch/sparc/lib/bitext.c
+++ b/arch/sparc/lib/bitext.c
@@ -10,7 +10,7 @@
  */
 
 #include <linux/string.h>
-#include <linux/bitops.h>
+#include <linux/bitmap.h>
 
 #include <asm/bitext.h>
 
@@ -80,8 +80,7 @@
 		while (test_bit(offset + i, t->map) == 0) {
 			i++;
 			if (i == len) {
-				for (i = 0; i < len; i++)
-					__set_bit(offset + i, t->map);
+				bitmap_set(t->map, offset, len);
 				if (offset == t->first_free)
 					t->first_free = find_next_zero_bit
 							(t->map, t->size,
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index e11b5fc..08948e4 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -1,25 +1,34 @@
 # For a description of the syntax of this configuration file,
 # see Documentation/kbuild/config-language.txt.
 
+config TILE
+	def_bool y
+	select HAVE_KVM if !TILEGX
+	select GENERIC_FIND_FIRST_BIT
+	select GENERIC_FIND_NEXT_BIT
+	select USE_GENERIC_SMP_HELPERS
+	select CC_OPTIMIZE_FOR_SIZE
+	select HAVE_GENERIC_HARDIRQS
+	select GENERIC_IRQ_PROBE
+	select GENERIC_PENDING_IRQ if SMP
+
+# FIXME: investigate whether we need/want these options.
+#	select HAVE_IOREMAP_PROT
+#       select HAVE_OPTPROBES
+#       select HAVE_REGS_AND_STACK_ACCESS_API
+#       select HAVE_HW_BREAKPOINT
+#       select PERF_EVENTS
+#       select HAVE_USER_RETURN_NOTIFIER
+#       config NO_BOOTMEM
+#       config ARCH_SUPPORTS_DEBUG_PAGEALLOC
+#       config HUGETLB_PAGE_SIZE_VARIABLE
+
 config MMU
 	def_bool y
 
 config GENERIC_CSUM
 	def_bool y
 
-config GENERIC_HARDIRQS
-	def_bool y
-
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
-config GENERIC_IRQ_PROBE
-	def_bool y
-
-config GENERIC_PENDING_IRQ
-	def_bool y
-	depends on GENERIC_HARDIRQS && SMP
-
 config SEMAPHORE_SLEEPERS
 	def_bool y
 
@@ -97,26 +106,6 @@
 	select HVC_DRIVER
 	def_bool y
 
-config TILE
-	def_bool y
-	select HAVE_KVM if !TILEGX
-	select GENERIC_FIND_FIRST_BIT
-	select GENERIC_FIND_NEXT_BIT
-	select USE_GENERIC_SMP_HELPERS
-	select CC_OPTIMIZE_FOR_SIZE
-
-# FIXME: investigate whether we need/want these options.
-#	select HAVE_IOREMAP_PROT
-#       select HAVE_OPTPROBES
-#       select HAVE_REGS_AND_STACK_ACCESS_API
-#       select HAVE_HW_BREAKPOINT
-#       select PERF_EVENTS
-#       select HAVE_USER_RETURN_NOTIFIER
-#       config NO_BOOTMEM
-#       config ARCH_SUPPORTS_DEBUG_PAGEALLOC
-#       config HUGETLB_PAGE_SIZE_VARIABLE
-
-
 # Please note: TILE-Gx support is not yet finalized; this is
 # the preliminary support.  TILE-Gx drivers are only provided
 # with the alpha or beta test versions for Tilera customers.
@@ -220,7 +209,7 @@
 
 choice
 	depends on !TILEGX
-	prompt "Memory split" if EMBEDDED
+	prompt "Memory split" if EXPERT
 	default VMSPLIT_3G
 	---help---
 	  Select the desired split between kernel and user memory.
diff --git a/arch/tile/Kconfig.debug b/arch/tile/Kconfig.debug
index a81f0fb..9bc161a 100644
--- a/arch/tile/Kconfig.debug
+++ b/arch/tile/Kconfig.debug
@@ -3,7 +3,7 @@
 source "lib/Kconfig.debug"
 
 config EARLY_PRINTK
-	bool "Early printk" if EMBEDDED && DEBUG_KERNEL
+	bool "Early printk" if EXPERT && DEBUG_KERNEL
 	default y
 	help
 	  Write kernel log output directly via the hypervisor console.
diff --git a/arch/tile/configs/tile_defconfig b/arch/tile/configs/tile_defconfig
index 919c54a..0fe54445 100644
--- a/arch/tile/configs/tile_defconfig
+++ b/arch/tile/configs/tile_defconfig
@@ -3,7 +3,7 @@
 CONFIG_SYSVIPC=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE="usr/contents.txt"
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
 CONFIG_MODULES=y
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
index 049d048..e351e14 100644
--- a/arch/um/Kconfig.common
+++ b/arch/um/Kconfig.common
@@ -3,14 +3,10 @@
 	option defconfig_list
 	default "arch/$ARCH/defconfig"
 
-# UML uses the generic IRQ subsystem
-config GENERIC_HARDIRQS
-	bool
-	default y
-
 config UML
 	bool
 	default y
+	select HAVE_GENERIC_HARDIRQS
 
 config MMU
 	bool
diff --git a/arch/um/Kconfig.um b/arch/um/Kconfig.um
index f8d1d0d..90a438a 100644
--- a/arch/um/Kconfig.um
+++ b/arch/um/Kconfig.um
@@ -120,9 +120,6 @@
 
 	  If you don't know what to do, say N.
 
-config GENERIC_HARDIRQS_NO__DO_IRQ
-	def_bool y
-
 config NR_CPUS
 	int "Maximum number of CPUs (2-32)"
 	range 2 32
diff --git a/arch/um/defconfig b/arch/um/defconfig
index 564f3de..9f7634f 100644
--- a/arch/um/defconfig
+++ b/arch/um/defconfig
@@ -133,7 +133,7 @@
 # CONFIG_BLK_DEV_INITRD is not set
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_SYSCTL=y
-# CONFIG_EMBEDDED is not set
+# CONFIG_EXPERT is not set
 CONFIG_UID16=y
 CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 3ed5ad9..d5ed94d 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -627,11 +627,11 @@
          as it is off-chip. APB timers are always running regardless of CPU
          C states, they are used as per CPU clockevent device when possible.
 
-# Mark as embedded because too many people got it wrong.
+# Mark as expert because too many people got it wrong.
 # The code disables itself when not needed.
 config DMI
 	default y
-	bool "Enable DMI scanning" if EMBEDDED
+	bool "Enable DMI scanning" if EXPERT
 	---help---
 	  Enabled scanning of DMI to identify machine quirks. Say Y
 	  here unless you have verified that your setup is not
@@ -639,7 +639,7 @@
 	  BIOS code.
 
 config GART_IOMMU
-	bool "GART IOMMU support" if EMBEDDED
+	bool "GART IOMMU support" if EXPERT
 	default y
 	select SWIOTLB
 	depends on X86_64 && PCI && AMD_NB
@@ -889,7 +889,7 @@
 	depends on X86_MCE_INTEL
 
 config VM86
-	bool "Enable VM86 support" if EMBEDDED
+	bool "Enable VM86 support" if EXPERT
 	default y
 	depends on X86_32
 	---help---
@@ -1073,7 +1073,7 @@
 
 choice
 	depends on EXPERIMENTAL
-	prompt "Memory split" if EMBEDDED
+	prompt "Memory split" if EXPERT
 	default VMSPLIT_3G
 	depends on X86_32
 	---help---
@@ -1135,7 +1135,7 @@
 	def_bool X86_64 || HIGHMEM64G
 
 config DIRECT_GBPAGES
-	bool "Enable 1GB pages for kernel pagetables" if EMBEDDED
+	bool "Enable 1GB pages for kernel pagetables" if EXPERT
 	default y
 	depends on X86_64
 	---help---
@@ -1369,7 +1369,7 @@
 
 config MTRR
 	def_bool y
-	prompt "MTRR (Memory Type Range Register) support" if EMBEDDED
+	prompt "MTRR (Memory Type Range Register) support" if EXPERT
 	---help---
 	  On Intel P6 family processors (Pentium Pro, Pentium II and later)
 	  the Memory Type Range Registers (MTRRs) may be used to control
@@ -1435,7 +1435,7 @@
 
 config X86_PAT
 	def_bool y
-	prompt "x86 PAT support" if EMBEDDED
+	prompt "x86 PAT support" if EXPERT
 	depends on MTRR
 	---help---
 	  Use PAT attributes to setup page level cache control.
@@ -1539,7 +1539,7 @@
 	  code in physical address mode via KEXEC
 
 config PHYSICAL_START
-	hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
+	hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
 	default "0x1000000"
 	---help---
 	  This gives the physical address where the kernel is loaded.
@@ -1934,7 +1934,7 @@
 	depends on X86_64 && PCI && ACPI
 
 config PCI_CNB20LE_QUIRK
-	bool "Read CNB20LE Host Bridge Windows" if EMBEDDED
+	bool "Read CNB20LE Host Bridge Windows" if EXPERT
 	default n
 	depends on PCI && EXPERIMENTAL
 	help
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 15588a0..283c5a6 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -424,7 +424,7 @@
 	depends on !(MK6 || MWINCHIPC6 || MWINCHIP3D || MCYRIXIII || M586MMX || M586TSC || M586 || M486 || M386) && !UML
 
 menuconfig PROCESSOR_SELECT
-	bool "Supported processor vendors" if EMBEDDED
+	bool "Supported processor vendors" if EXPERT
 	---help---
 	  This lets you choose what x86 vendor support code your kernel
 	  will include.
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 45143bb..615e188 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -31,7 +31,7 @@
 	  see errors. Disable this if you want silent bootup.
 
 config EARLY_PRINTK
-	bool "Early printk" if EMBEDDED
+	bool "Early printk" if EXPERT
 	default y
 	---help---
 	  Write kernel log output directly into the VGA buffer or to a serial
@@ -138,7 +138,7 @@
 
 config DOUBLEFAULT
 	default y
-	bool "Enable doublefault exception handler" if EMBEDDED
+	bool "Enable doublefault exception handler" if EXPERT
 	depends on X86_32
 	---help---
 	  This option allows trapping of rare doublefault exceptions that
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 5e3969c..3c89694 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -233,6 +233,7 @@
 extern void init_bsp_APIC(void);
 extern void setup_local_APIC(void);
 extern void end_local_APIC_setup(void);
+extern void bsp_end_local_APIC_setup(void);
 extern void init_apic_mappings(void);
 void register_lapic_address(unsigned long address);
 extern void setup_boot_APIC_clock(void);
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index 63e35ec..62f0844 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -1,48 +1,8 @@
 #ifndef _ASM_X86_CACHEFLUSH_H
 #define _ASM_X86_CACHEFLUSH_H
 
-/* Keep includes the same across arches.  */
-#include <linux/mm.h>
-
 /* Caches aren't brain-dead on the intel. */
-static inline void flush_cache_all(void) { }
-static inline void flush_cache_mm(struct mm_struct *mm) { }
-static inline void flush_cache_dup_mm(struct mm_struct *mm) { }
-static inline void flush_cache_range(struct vm_area_struct *vma,
-				     unsigned long start, unsigned long end) { }
-static inline void flush_cache_page(struct vm_area_struct *vma,
-				    unsigned long vmaddr, unsigned long pfn) { }
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-static inline void flush_dcache_page(struct page *page) { }
-static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
-static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
-static inline void flush_icache_range(unsigned long start,
-				      unsigned long end) { }
-static inline void flush_icache_page(struct vm_area_struct *vma,
-				     struct page *page) { }
-static inline void flush_icache_user_range(struct vm_area_struct *vma,
-					   struct page *page,
-					   unsigned long addr,
-					   unsigned long len) { }
-static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
-static inline void flush_cache_vunmap(unsigned long start,
-				      unsigned long end) { }
-
-static inline void copy_to_user_page(struct vm_area_struct *vma,
-				     struct page *page, unsigned long vaddr,
-				     void *dst, const void *src,
-				     unsigned long len)
-{
-	memcpy(dst, src, len);
-}
-
-static inline void copy_from_user_page(struct vm_area_struct *vma,
-				       struct page *page, unsigned long vaddr,
-				       void *dst, const void *src,
-				       unsigned long len)
-{
-	memcpy(dst, src, len);
-}
+#include <asm-generic/cacheflush.h>
 
 #ifdef CONFIG_X86_PAT
 /*
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 4fab24d..4564c8e 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -32,5 +32,6 @@
 
 DECLARE_PER_CPU(int, cpu_state);
 
+int mwait_usable(const struct cpuinfo_x86 *);
 
 #endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index f52d42e..574dbc2 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -14,7 +14,7 @@
 	do {							\
 		asm goto("1:"					\
 			JUMP_LABEL_INITIAL_NOP			\
-			".pushsection __jump_table,  \"a\" \n\t"\
+			".pushsection __jump_table,  \"aw\" \n\t"\
 			_ASM_PTR "1b, %l[" #label "], %c0 \n\t" \
 			".popsection \n\t"			\
 			: :  "i" (key) :  : label);		\
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 4a2d4e0..8b5393e 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -36,8 +36,6 @@
 	unsigned cpu = smp_processor_id();
 
 	if (likely(prev != next)) {
-		/* stop flush ipis for the previous mm */
-		cpumask_clear_cpu(cpu, mm_cpumask(prev));
 #ifdef CONFIG_SMP
 		percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
 		percpu_write(cpu_tlbstate.active_mm, next);
@@ -47,6 +45,9 @@
 		/* Re-load page tables */
 		load_cr3(next->pgd);
 
+		/* stop flush ipis for the previous mm */
+		cpumask_clear_cpu(cpu, mm_cpumask(prev));
+
 		/*
 		 * load the LDT, if the LDT is different:
 		 */
diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h
index a372290..b0ef2b4 100644
--- a/arch/x86/include/asm/numa_32.h
+++ b/arch/x86/include/asm/numa_32.h
@@ -1,6 +1,8 @@
 #ifndef _ASM_X86_NUMA_32_H
 #define _ASM_X86_NUMA_32_H
 
+extern int numa_off;
+
 extern int pxm_to_nid(int pxm);
 extern void numa_remove_cpu(int cpu);
 
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h
index 5ae8728..0493be3 100644
--- a/arch/x86/include/asm/numa_64.h
+++ b/arch/x86/include/asm/numa_64.h
@@ -40,6 +40,7 @@
 #ifdef CONFIG_NUMA_EMU
 #define FAKE_NODE_MIN_SIZE	((u64)32 << 20)
 #define FAKE_NODE_MIN_HASH_MASK	(~(FAKE_NODE_MIN_SIZE - 1UL))
+void numa_emu_cmdline(char *);
 #endif /* CONFIG_NUMA_EMU */
 #else
 static inline void init_cpu_to_node(void)		{ }
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 2071a8b..ebbc4d8 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -558,13 +558,12 @@
 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
 			      pmd_t *pmdp, pmd_t pmd)
 {
-#if PAGETABLE_LEVELS >= 3
 	if (sizeof(pmdval_t) > sizeof(long))
 		/* 5 arg words */
 		pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
 	else
-		PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp, pmd.pmd);
-#endif
+		PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
+			    native_pmd_val(pmd));
 }
 #endif
 
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 8ee4516..7e17295 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -273,34 +273,34 @@
 	typeof(var) pxo_new__ = (nval);					\
 	switch (sizeof(var)) {						\
 	case 1:								\
-		asm("\n1:mov "__percpu_arg(1)",%%al"			\
-		    "\n\tcmpxchgb %2, "__percpu_arg(1)			\
+		asm("\n\tmov "__percpu_arg(1)",%%al"			\
+		    "\n1:\tcmpxchgb %2, "__percpu_arg(1)		\
 		    "\n\tjnz 1b"					\
-			    : "=a" (pxo_ret__), "+m" (var)		\
+			    : "=&a" (pxo_ret__), "+m" (var)		\
 			    : "q" (pxo_new__)				\
 			    : "memory");				\
 		break;							\
 	case 2:								\
-		asm("\n1:mov "__percpu_arg(1)",%%ax"			\
-		    "\n\tcmpxchgw %2, "__percpu_arg(1)			\
+		asm("\n\tmov "__percpu_arg(1)",%%ax"			\
+		    "\n1:\tcmpxchgw %2, "__percpu_arg(1)		\
 		    "\n\tjnz 1b"					\
-			    : "=a" (pxo_ret__), "+m" (var)		\
+			    : "=&a" (pxo_ret__), "+m" (var)		\
 			    : "r" (pxo_new__)				\
 			    : "memory");				\
 		break;							\
 	case 4:								\
-		asm("\n1:mov "__percpu_arg(1)",%%eax"			\
-		    "\n\tcmpxchgl %2, "__percpu_arg(1)			\
+		asm("\n\tmov "__percpu_arg(1)",%%eax"			\
+		    "\n1:\tcmpxchgl %2, "__percpu_arg(1)		\
 		    "\n\tjnz 1b"					\
-			    : "=a" (pxo_ret__), "+m" (var)		\
+			    : "=&a" (pxo_ret__), "+m" (var)		\
 			    : "r" (pxo_new__)				\
 			    : "memory");				\
 		break;							\
 	case 8:								\
-		asm("\n1:mov "__percpu_arg(1)",%%rax"			\
-		    "\n\tcmpxchgq %2, "__percpu_arg(1)			\
+		asm("\n\tmov "__percpu_arg(1)",%%rax"			\
+		    "\n1:\tcmpxchgq %2, "__percpu_arg(1)		\
 		    "\n\tjnz 1b"					\
-			    : "=a" (pxo_ret__), "+m" (var)		\
+			    : "=&a" (pxo_ret__), "+m" (var)		\
 			    : "r" (pxo_new__)				\
 			    : "memory");				\
 		break;							\
@@ -414,8 +414,6 @@
 #define this_cpu_xchg_1(pcp, nval)	percpu_xchg_op(pcp, nval)
 #define this_cpu_xchg_2(pcp, nval)	percpu_xchg_op(pcp, nval)
 #define this_cpu_xchg_4(pcp, nval)	percpu_xchg_op(pcp, nval)
-#define this_cpu_xchg_8(pcp, nval)	percpu_xchg_op(pcp, nval)
-#define this_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
 
 #define irqsafe_cpu_add_1(pcp, val)	percpu_add_op((pcp), val)
 #define irqsafe_cpu_add_2(pcp, val)	percpu_add_op((pcp), val)
@@ -432,8 +430,6 @@
 #define irqsafe_cpu_xchg_1(pcp, nval)	percpu_xchg_op(pcp, nval)
 #define irqsafe_cpu_xchg_2(pcp, nval)	percpu_xchg_op(pcp, nval)
 #define irqsafe_cpu_xchg_4(pcp, nval)	percpu_xchg_op(pcp, nval)
-#define irqsafe_cpu_xchg_8(pcp, nval)	percpu_xchg_op(pcp, nval)
-#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
 
 #ifndef CONFIG_M386
 #define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
@@ -475,11 +471,15 @@
 #define this_cpu_or_8(pcp, val)		percpu_to_op("or", (pcp), val)
 #define this_cpu_xor_8(pcp, val)	percpu_to_op("xor", (pcp), val)
 #define this_cpu_add_return_8(pcp, val)	percpu_add_return_op(pcp, val)
+#define this_cpu_xchg_8(pcp, nval)	percpu_xchg_op(pcp, nval)
+#define this_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
 
 #define irqsafe_cpu_add_8(pcp, val)	percpu_add_op((pcp), val)
 #define irqsafe_cpu_and_8(pcp, val)	percpu_to_op("and", (pcp), val)
 #define irqsafe_cpu_or_8(pcp, val)	percpu_to_op("or", (pcp), val)
 #define irqsafe_cpu_xor_8(pcp, val)	percpu_to_op("xor", (pcp), val)
+#define irqsafe_cpu_xchg_8(pcp, nval)	percpu_xchg_op(pcp, nval)
+#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
 #endif
 
 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 4c2f63c..1f46951 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -40,10 +40,7 @@
 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
 
 /* Static state in head.S used to set up a CPU */
-extern struct {
-	void *sp;
-	unsigned short ss;
-} stack_start;
+extern unsigned long stack_start; /* Initial stack pointer address */
 
 struct smp_ops {
 	void (*smp_prepare_boot_cpu)(void);
diff --git a/arch/x86/include/asm/system_64.h b/arch/x86/include/asm/system_64.h
deleted file mode 100644
index 1159e09..0000000
--- a/arch/x86/include/asm/system_64.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef _ASM_X86_SYSTEM_64_H
-#define _ASM_X86_SYSTEM_64_H
-
-#include <asm/segment.h>
-#include <asm/cmpxchg.h>
-
-
-static inline unsigned long read_cr8(void)
-{
-	unsigned long cr8;
-	asm volatile("movq %%cr8,%0" : "=r" (cr8));
-	return cr8;
-}
-
-static inline void write_cr8(unsigned long val)
-{
-	asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
-}
-
-#include <linux/irqflags.h>
-
-#endif /* _ASM_X86_SYSTEM_64_H */
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 69fd72a..68d1537 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -12,10 +12,8 @@
 #include <linux/cpumask.h>
 #include <asm/segment.h>
 #include <asm/desc.h>
-
-#ifdef CONFIG_X86_32
 #include <asm/pgtable.h>
-#endif
+#include <asm/cacheflush.h>
 
 #include "realmode/wakeup.h"
 #include "sleep.h"
@@ -100,7 +98,7 @@
 #else /* CONFIG_64BIT */
 	header->trampoline_segment = setup_trampoline() >> 4;
 #ifdef CONFIG_SMP
-	stack_start.sp = temp_stack + sizeof(temp_stack);
+	stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
 	early_gdt_descr.address =
 			(unsigned long)get_cpu_gdt_table(smp_processor_id());
 	initial_gs = per_cpu_offset(smp_processor_id());
@@ -149,6 +147,15 @@
 	memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP");
 }
 
+int __init acpi_configure_wakeup_memory(void)
+{
+	if (acpi_realmode)
+		set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT);
+
+	return 0;
+}
+arch_initcall(acpi_configure_wakeup_memory);
+
 
 static int __init acpi_sleep_setup(char *str)
 {
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 1236085..7038b95 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -671,7 +671,7 @@
 
 	atomic_set(&stop_machine_first, 1);
 	wrote_text = 0;
-	stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
+	__stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
 }
 
 #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 06c196d..76b96d7 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1381,12 +1381,17 @@
 #endif
 
 	apic_pm_activate();
+}
+
+void __init bsp_end_local_APIC_setup(void)
+{
+	end_local_APIC_setup();
 
 	/*
 	 * Now that local APIC setup is completed for BP, configure the fault
 	 * handling for interrupt remapping.
 	 */
-	if (!smp_processor_id() && intr_remapping_enabled)
+	if (intr_remapping_enabled)
 		enable_drhd_fault_handling();
 
 }
@@ -1756,7 +1761,7 @@
 		enable_IO_APIC();
 #endif
 
-	end_local_APIC_setup();
+	bsp_end_local_APIC_setup();
 
 #ifdef CONFIG_X86_IO_APIC
 	if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 697dc34..ca9e2a35 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -4002,6 +4002,9 @@
 {
 	int i = 0;
 
+	if (nr_ioapics == 0)
+		return -1;
+
 	/* Find the IOAPIC that manages this GSI. */
 	for (i = 0; i < nr_ioapics; i++) {
 		if ((gsi >= mp_gsi_routing[i].gsi_base)
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 7283e98..ec2c19a 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -45,6 +45,7 @@
 	{ 0x0a, LVL_1_DATA, 8 },	/* 2 way set assoc, 32 byte line size */
 	{ 0x0c, LVL_1_DATA, 16 },	/* 4-way set assoc, 32 byte line size */
 	{ 0x0d, LVL_1_DATA, 16 },	/* 4-way set assoc, 64 byte line size */
+	{ 0x0e, LVL_1_DATA, 24 },	/* 6-way set assoc, 64 byte line size */
 	{ 0x21, LVL_2,      256 },	/* 8-way set assoc, 64 byte line size */
 	{ 0x22, LVL_3,      512 },	/* 4-way set assoc, sectored cache, 64 byte line size */
 	{ 0x23, LVL_3,      MB(1) },	/* 8-way set assoc, sectored cache, 64 byte line size */
@@ -66,6 +67,7 @@
 	{ 0x45, LVL_2,      MB(2) },	/* 4-way set assoc, 32 byte line size */
 	{ 0x46, LVL_3,      MB(4) },	/* 4-way set assoc, 64 byte line size */
 	{ 0x47, LVL_3,      MB(8) },	/* 8-way set assoc, 64 byte line size */
+	{ 0x48, LVL_2,      MB(3) },	/* 12-way set assoc, 64 byte line size */
 	{ 0x49, LVL_3,      MB(4) },	/* 16-way set assoc, 64 byte line size */
 	{ 0x4a, LVL_3,      MB(6) },	/* 12-way set assoc, 64 byte line size */
 	{ 0x4b, LVL_3,      MB(8) },	/* 16-way set assoc, 64 byte line size */
@@ -87,6 +89,7 @@
 	{ 0x7c, LVL_2,      MB(1) },	/* 8-way set assoc, sectored cache, 64 byte line size */
 	{ 0x7d, LVL_2,      MB(2) },	/* 8-way set assoc, 64 byte line size */
 	{ 0x7f, LVL_2,      512 },	/* 2-way set assoc, 64 byte line size */
+	{ 0x80, LVL_2,      512 },	/* 8-way set assoc, 64 byte line size */
 	{ 0x82, LVL_2,      256 },	/* 8-way set assoc, 32 byte line size */
 	{ 0x83, LVL_2,      512 },	/* 8-way set assoc, 32 byte line size */
 	{ 0x84, LVL_2,      MB(1) },	/* 8-way set assoc, 32 byte line size */
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index e12246f..6f8c5e9 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -59,6 +59,7 @@
 
 /* Callback to handle core threshold interrupts */
 int (*platform_thermal_notify)(__u64 msr_val);
+EXPORT_SYMBOL(platform_thermal_notify);
 
 static DEFINE_PER_CPU(struct thermal_state, thermal_state);
 
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 01c0f3e..bebabec 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -793,13 +793,21 @@
 }
 
 /*
- * MTRR initialization for all AP's
+ * Delayed MTRR initialization for all AP's
  */
 void mtrr_aps_init(void)
 {
 	if (!use_intel())
 		return;
 
+	/*
+	 * Check if someone has requested the delay of AP MTRR initialization,
+	 * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
+	 * then we are done.
+	 */
+	if (!mtrr_aps_delayed_init)
+		return;
+
 	set_mtrr(~0U, 0, 0, 0);
 	mtrr_aps_delayed_init = false;
 }
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index e56b9bf..f7a0993 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -682,7 +682,7 @@
 	 * if an event is shared accross the logical threads
 	 * the user needs special permissions to be able to use it
 	 */
-	if (p4_event_bind_map[v].shared) {
+	if (p4_ht_active() && p4_event_bind_map[v].shared) {
 		if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
 			return -EACCES;
 	}
@@ -727,7 +727,8 @@
 		event->hw.config = p4_set_ht_bit(event->hw.config);
 
 	if (event->attr.type == PERF_TYPE_RAW) {
-
+		struct p4_event_bind *bind;
+		unsigned int esel;
 		/*
 		 * Clear bits we reserve to be managed by kernel itself
 		 * and never allowed from a user space
@@ -743,6 +744,13 @@
 		 * bits since we keep additional info here (for cache events and etc)
 		 */
 		event->hw.config |= event->attr.config;
+		bind = p4_config_get_bind(event->attr.config);
+		if (!bind) {
+			rc = -EINVAL;
+			goto out;
+		}
+		esel = P4_OPCODE_ESEL(bind->opcode);
+		event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
 	}
 
 	rc = x86_setup_perfctr(event);
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 6410133..a6b6fcf 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -149,13 +149,13 @@
 	unsigned used = 0;
 	struct thread_info *tinfo;
 	int graph = 0;
+	unsigned long dummy;
 	unsigned long bp;
 
 	if (!task)
 		task = current;
 
 	if (!stack) {
-		unsigned long dummy;
 		stack = &dummy;
 		if (task && task != current)
 			stack = (unsigned long *)task->thread.sp;
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index fc293dc..767d6c4 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -85,6 +85,8 @@
  */
 __HEAD
 ENTRY(startup_32)
+	movl pa(stack_start),%ecx
+	
 	/* test KEEP_SEGMENTS flag to see if the bootloader is asking
 		us to not reload segments */
 	testb $(1<<6), BP_loadflags(%esi)
@@ -99,7 +101,9 @@
 	movl %eax,%es
 	movl %eax,%fs
 	movl %eax,%gs
+	movl %eax,%ss
 2:
+	leal -__PAGE_OFFSET(%ecx),%esp
 
 /*
  * Clear BSS first so that there are no surprises...
@@ -145,8 +149,6 @@
  * _brk_end is set up to point to the first "safe" location.
  * Mappings are created both at virtual address 0 (identity mapping)
  * and PAGE_OFFSET for up to _end.
- *
- * Note that the stack is not yet set up!
  */
 #ifdef CONFIG_X86_PAE
 
@@ -282,6 +284,9 @@
 	movl %eax,%es
 	movl %eax,%fs
 	movl %eax,%gs
+	movl pa(stack_start),%ecx
+	movl %eax,%ss
+	leal -__PAGE_OFFSET(%ecx),%esp
 #endif /* CONFIG_SMP */
 default_entry:
 
@@ -347,8 +352,8 @@
 	movl %eax,%cr0		/* ..and set paging (PG) bit */
 	ljmp $__BOOT_CS,$1f	/* Clear prefetch and normalize %eip */
 1:
-	/* Set up the stack pointer */
-	lss stack_start,%esp
+	/* Shift the stack pointer to a virtual address */
+	addl $__PAGE_OFFSET, %esp
 
 /*
  * Initialize eflags.  Some BIOS's leave bits like NT set.  This would
@@ -360,9 +365,7 @@
 
 #ifdef CONFIG_SMP
 	cmpb $0, ready
-	jz  1f				/* Initial CPU cleans BSS */
-	jmp checkCPUtype
-1:
+	jnz checkCPUtype
 #endif /* CONFIG_SMP */
 
 /*
@@ -470,14 +473,7 @@
 
 	cld			# gcc2 wants the direction flag cleared at all times
 	pushl $0		# fake return address for unwinder
-#ifdef CONFIG_SMP
-	movb ready, %cl
 	movb $1, ready
-	cmpb $0,%cl		# the first CPU calls start_kernel
-	je   1f
-	movl (stack_start), %esp
-1:
-#endif /* CONFIG_SMP */
 	jmp *(initial_code)
 
 /*
@@ -670,15 +666,15 @@
 #endif
 
 .data
+.balign 4
 ENTRY(stack_start)
 	.long init_thread_union+THREAD_SIZE
-	.long __BOOT_DS
-
-ready:	.byte 0
 
 early_recursion_flag:
 	.long 0
 
+ready:	.byte 0
+
 int_msg:
 	.asciz "Unknown interrupt or fault at: %p %p %p\n"
 
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 52945da..387b6a0 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -367,7 +367,8 @@
 		if (irr  & (1 << (vector % 32))) {
 			irq = __this_cpu_read(vector_irq[vector]);
 
-			data = irq_get_irq_data(irq);
+			desc = irq_to_desc(irq);
+			data = &desc->irq_data;
 			raw_spin_lock(&desc->lock);
 			if (data->chip->irq_retrigger)
 				data->chip->irq_retrigger(data);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index d8286ed..ff45541 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -14,6 +14,7 @@
 #include <linux/utsname.h>
 #include <trace/events/power.h>
 #include <linux/hw_breakpoint.h>
+#include <asm/cpu.h>
 #include <asm/system.h>
 #include <asm/apic.h>
 #include <asm/syscalls.h>
@@ -91,21 +92,31 @@
 
 void show_regs_common(void)
 {
-	const char *board, *product;
+	const char *vendor, *product, *board;
 
-	board = dmi_get_system_info(DMI_BOARD_NAME);
-	if (!board)
-		board = "";
+	vendor = dmi_get_system_info(DMI_SYS_VENDOR);
+	if (!vendor)
+		vendor = "";
 	product = dmi_get_system_info(DMI_PRODUCT_NAME);
 	if (!product)
 		product = "";
 
+	/* Board Name is optional */
+	board = dmi_get_system_info(DMI_BOARD_NAME);
+
 	printk(KERN_CONT "\n");
-	printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n",
+	printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
 		current->pid, current->comm, print_tainted(),
 		init_utsname()->release,
 		(int)strcspn(init_utsname()->version, " "),
-		init_utsname()->version, board, product);
+		init_utsname()->version);
+	printk(KERN_CONT " ");
+	printk(KERN_CONT "%s %s", vendor, product);
+	if (board) {
+		printk(KERN_CONT "/");
+		printk(KERN_CONT "%s", board);
+	}
+	printk(KERN_CONT "\n");
 }
 
 void flush_thread(void)
@@ -505,7 +516,7 @@
 #define MWAIT_ECX_EXTENDED_INFO		0x01
 #define MWAIT_EDX_C1			0xf0
 
-static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
+int mwait_usable(const struct cpuinfo_x86 *c)
 {
 	u32 eax, ebx, ecx, edx;
 
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 763df77..08776a9 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -638,7 +638,7 @@
 	 * target processor state.
 	 */
 	startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
-			 (unsigned long)stack_start.sp);
+			 stack_start);
 
 	/*
 	 * Run STARTUP IPI loop.
@@ -785,7 +785,7 @@
 #endif
 	early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
 	initial_code = (unsigned long)start_secondary;
-	stack_start.sp = (void *) c_idle.idle->thread.sp;
+	stack_start  = c_idle.idle->thread.sp;
 
 	/* start_ip had better be page-aligned! */
 	start_ip = setup_trampoline();
@@ -1060,7 +1060,7 @@
 
 		connect_bsp_APIC();
 		setup_local_APIC();
-		end_local_APIC_setup();
+		bsp_end_local_APIC_setup();
 		return -1;
 	}
 
@@ -1137,7 +1137,7 @@
 	if (!skip_ioapic_setup && nr_ioapics)
 		enable_IO_APIC();
 
-	end_local_APIC_setup();
+	bsp_end_local_APIC_setup();
 
 	map_cpu_to_logical_apicid();
 
@@ -1402,8 +1402,9 @@
 	unsigned int highest_subcstate = 0;
 	int i;
 	void *mwait_ptr;
+	struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
 
-	if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_MWAIT))
+	if (!(cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)))
 		return;
 	if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH))
 		return;
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index b34ab80..bf47007 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -34,9 +34,11 @@
 #ifdef CONFIG_X86_32
 OUTPUT_ARCH(i386)
 ENTRY(phys_startup_32)
+jiffies = jiffies_64;
 #else
 OUTPUT_ARCH(i386:x86-64)
 ENTRY(phys_startup_64)
+jiffies_64 = jiffies;
 #endif
 
 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
@@ -140,15 +142,6 @@
 		CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
 
 		DATA_DATA
-		/*
-		 * Workaround a binutils (2.20.51.0.12 to 2.21.51.0.3) bug.
-		 * This makes jiffies relocatable in such binutils
-		 */
-#ifdef CONFIG_X86_32
-		jiffies = jiffies_64;
-#else
-		jiffies_64 = jiffies;
-#endif
 		CONSTRUCTORS
 
 		/* rarely changed data like cpu maps */
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 25bd1bc..54ce246 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1150,8 +1150,8 @@
 	kvm_load_ldt(svm->host.ldt);
 #ifdef CONFIG_X86_64
 	loadsegment(fs, svm->host.fs);
-	load_gs_index(svm->host.gs);
 	wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
+	load_gs_index(svm->host.gs);
 #else
 	loadsegment(gs, svm->host.gs);
 #endif
diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig
index 3871804..6e121a2 100644
--- a/arch/x86/lguest/Kconfig
+++ b/arch/x86/lguest/Kconfig
@@ -2,6 +2,7 @@
 	bool "Lguest guest support"
 	select PARAVIRT
 	depends on X86_32
+	select VIRTUALIZATION
 	select VIRTIO
 	select VIRTIO_RING
 	select VIRTIO_CONSOLE
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index 4996cf5..eba687f0 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -824,7 +824,7 @@
 
 	for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
 		/* Some systems map "vectors" to interrupts weirdly.  Not us! */
-		__get_cpu_var(vector_irq)[i] = i - FIRST_EXTERNAL_VECTOR;
+		__this_cpu_write(vector_irq[i], i - FIRST_EXTERNAL_VECTOR);
 		if (i != SYSCALL_VECTOR)
 			set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]);
 	}
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 787c52c..ebf6d78 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -2,6 +2,28 @@
 #include <linux/topology.h>
 #include <linux/module.h>
 #include <linux/bootmem.h>
+#include <asm/numa.h>
+#include <asm/acpi.h>
+
+int __initdata numa_off;
+
+static __init int numa_setup(char *opt)
+{
+	if (!opt)
+		return -EINVAL;
+	if (!strncmp(opt, "off", 3))
+		numa_off = 1;
+#ifdef CONFIG_NUMA_EMU
+	if (!strncmp(opt, "fake=", 5))
+		numa_emu_cmdline(opt + 5);
+#endif
+#ifdef CONFIG_ACPI_NUMA
+	if (!strncmp(opt, "noacpi", 6))
+		acpi_numa = -1;
+#endif
+	return 0;
+}
+early_param("numa", numa_setup);
 
 /*
  * Which logical CPUs are on which nodes
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 1e72102..95ea155 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -30,7 +30,6 @@
 	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
 };
 
-int numa_off __initdata;
 static unsigned long __initdata nodemap_addr;
 static unsigned long __initdata nodemap_size;
 
@@ -263,6 +262,11 @@
 static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata;
 static char *cmdline __initdata;
 
+void __init numa_emu_cmdline(char *str)
+{
+	cmdline = str;
+}
+
 static int __init setup_physnodes(unsigned long start, unsigned long end,
 					int acpi, int amd)
 {
@@ -670,24 +674,6 @@
 	return pages;
 }
 
-static __init int numa_setup(char *opt)
-{
-	if (!opt)
-		return -EINVAL;
-	if (!strncmp(opt, "off", 3))
-		numa_off = 1;
-#ifdef CONFIG_NUMA_EMU
-	if (!strncmp(opt, "fake=", 5))
-		cmdline = opt + 5;
-#endif
-#ifdef CONFIG_ACPI_NUMA
-	if (!strncmp(opt, "noacpi", 6))
-		acpi_numa = -1;
-#endif
-	return 0;
-}
-early_param("numa", numa_setup);
-
 #ifdef CONFIG_NUMA
 
 static __init int find_near_online_node(int node)
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 8b830ca..d343b3c 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -256,7 +256,6 @@
 				   unsigned long pfn)
 {
 	pgprot_t forbidden = __pgprot(0);
-	pgprot_t required = __pgprot(0);
 
 	/*
 	 * The BIOS area between 640k and 1Mb needs to be executable for
@@ -282,12 +281,6 @@
 	if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
 		   __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
 		pgprot_val(forbidden) |= _PAGE_RW;
-	/*
-	 * .data and .bss should always be writable.
-	 */
-	if (within(address, (unsigned long)_sdata, (unsigned long)_edata) ||
-	    within(address, (unsigned long)__bss_start, (unsigned long)__bss_stop))
-		pgprot_val(required) |= _PAGE_RW;
 
 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
 	/*
@@ -327,7 +320,6 @@
 #endif
 
 	prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
-	prot = __pgprot(pgprot_val(prot) | pgprot_val(required));
 
 	return prot;
 }
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index f164345..ae96e7b 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -59,7 +59,6 @@
 static int __initdata num_memory_chunks; /* total number of memory chunks */
 static u8 __initdata apicid_to_pxm[MAX_APICID];
 
-int numa_off __initdata;
 int acpi_numa __initdata;
 
 static __init void bad_srat(void)
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 7e8d3bc..50542ef 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1194,7 +1194,7 @@
 	per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
 
 	local_irq_disable();
-	early_boot_irqs_off();
+	early_boot_irqs_disabled = true;
 
 	memblock_init();
 
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 9d30105..6a6fe89 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -126,7 +126,7 @@
 #endif
 };
 
-void __init xen_init_irq_ops()
+void __init xen_init_irq_ops(void)
 {
 	pv_irq_ops = xen_irq_ops;
 	x86_init.irqs.intr_init = xen_init_IRQ;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 8f2251d..fd12d7c 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -237,6 +237,18 @@
 			p2m_top[topidx] = mid;
 		}
 
+		/*
+		 * As long as the mfn_list has enough entries to completely
+		 * fill a p2m page, pointing into the array is ok. But if
+		 * not the entries beyond the last pfn will be undefined.
+		 */
+		if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) {
+			unsigned long p2midx;
+
+			p2midx = max_pfn % P2M_PER_PAGE;
+			for ( ; p2midx < P2M_PER_PAGE; p2midx++)
+				mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY;
+		}
 		p2m_top[topidx][mididx] = &mfn_list[pfn];
 	}
 
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index b5a7f92..a8a66a5 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -179,8 +179,13 @@
 	e820.nr_map = 0;
 	xen_extra_mem_start = mem_end;
 	for (i = 0; i < memmap.nr_entries; i++) {
-		unsigned long long end = map[i].addr + map[i].size;
+		unsigned long long end;
 
+		/* Guard against non-page aligned E820 entries. */
+		if (map[i].type == E820_RAM)
+			map[i].size -= (map[i].size + map[i].addr) % PAGE_SIZE;
+
+		end = map[i].addr + map[i].size;
 		if (map[i].type == E820_RAM && end > mem_end) {
 			/* RAM off the end - may be partially included */
 			u64 delta = min(map[i].size, end - mem_end);
@@ -350,6 +355,7 @@
 	boot_cpu_data.hlt_works_ok = 1;
 #endif
 	pm_idle = default_idle;
+	boot_option_idle_override = IDLE_HALT;
 
 	fiddle_vdso();
 }
diff --git a/arch/xtensa/configs/common_defconfig b/arch/xtensa/configs/common_defconfig
index 1d230ee..b90038e 100644
--- a/arch/xtensa/configs/common_defconfig
+++ b/arch/xtensa/configs/common_defconfig
@@ -32,7 +32,7 @@
 # CONFIG_HOTPLUG is not set
 CONFIG_KOBJECT_UEVENT=y
 # CONFIG_IKCONFIG is not set
-# CONFIG_EMBEDDED is not set
+# CONFIG_EXPERT is not set
 CONFIG_KALLSYMS=y
 # CONFIG_KALLSYMS_ALL is not set
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig
index 7368164..0234cd1 100644
--- a/arch/xtensa/configs/iss_defconfig
+++ b/arch/xtensa/configs/iss_defconfig
@@ -55,7 +55,7 @@
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_SYSCTL=y
 CONFIG_ANON_INODES=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
 # CONFIG_KALLSYMS_ALL is not set
diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig
index bb84fbc..095cd80 100644
--- a/arch/xtensa/configs/s6105_defconfig
+++ b/arch/xtensa/configs/s6105_defconfig
@@ -55,7 +55,7 @@
 CONFIG_INITRAMFS_SOURCE=""
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_SYSCTL=y
-CONFIG_EMBEDDED=y
+CONFIG_EXPERT=y
 CONFIG_SYSCTL_SYSCALL=y
 CONFIG_KALLSYMS=y
 # CONFIG_KALLSYMS_ALL is not set
diff --git a/block/Kconfig b/block/Kconfig
index 6c9213e..60be1e0 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -2,7 +2,7 @@
 # Block layer core configuration
 #
 menuconfig BLOCK
-       bool "Enable the block layer" if EMBEDDED
+       bool "Enable the block layer" if EXPERT
        default y
        help
 	 Provide block layer support for the kernel.
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 381b09b..a89043a 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -168,7 +168,15 @@
 	 * tree of blkg (instead of traversing through hash list all
 	 * the time.
 	 */
-	tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
+
+	/*
+	 * This is the common case when there are no blkio cgroups.
+ 	 * Avoid lookup in this case
+ 	 */
+	if (blkcg == &blkio_root_cgroup)
+		tg = &td->root_tg;
+	else
+		tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key));
 
 	/* Fill in device details for root group */
 	if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 501ffdf..7be4c79 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -599,7 +599,7 @@
 }
 
 static inline unsigned
-cfq_scaled_group_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
 	unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
 	if (cfqd->cfq_latency) {
@@ -631,7 +631,7 @@
 static inline void
 cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
-	unsigned slice = cfq_scaled_group_slice(cfqd, cfqq);
+	unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
 
 	cfqq->slice_start = jiffies;
 	cfqq->slice_end = jiffies + slice;
@@ -1671,7 +1671,7 @@
 	 */
 	if (timed_out) {
 		if (cfq_cfqq_slice_new(cfqq))
-			cfqq->slice_resid = cfq_scaled_group_slice(cfqd, cfqq);
+			cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
 		else
 			cfqq->slice_resid = cfqq->slice_end - jiffies;
 		cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
@@ -3432,6 +3432,10 @@
 {
 	struct cfq_io_context *cic = cfqd->active_cic;
 
+	/* If the queue already has requests, don't wait */
+	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
+		return false;
+
 	/* If there are other queues in the group, don't wait */
 	if (cfqq->cfqg->nr_cfqq > 1)
 		return false;
diff --git a/drivers/Makefile b/drivers/Makefile
index 7eb35f4..b423bb1 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -24,7 +24,7 @@
 # regulators early, since some subsystems rely on them to initialize
 obj-$(CONFIG_REGULATOR)		+= regulator/
 
-# char/ comes before serial/ etc so that the VT console is the boot-time
+# tty/ comes before char/ so that the VT console is the boot-time
 # default.
 obj-y				+= tty/
 obj-y				+= char/
@@ -38,7 +38,6 @@
 obj-$(CONFIG_FB_I810)           += video/i810/
 obj-$(CONFIG_FB_INTEL)          += video/intelfb/
 
-obj-y				+= serial/
 obj-$(CONFIG_PARPORT)		+= parport/
 obj-y				+= base/ block/ misc/ mfd/ nfc/
 obj-$(CONFIG_NUBUS)		+= nubus/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 10c7ad5..2aa042a 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -318,7 +318,7 @@
 	  the module will be called pci_slot.
 
 config X86_PM_TIMER
-	bool "Power Management Timer Support" if EMBEDDED
+	bool "Power Management Timer Support" if EXPERT
 	depends on X86
 	default y
 	help
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index 3e50c74..e0ba17f 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index b17d8de..ab87396 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index 72e9d5e..eb0b1f8 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 894a0ff..666271b 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 70e0b28..41d247d 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 0e4dba0..82a1bd2 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 258d628..e7213be 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 049e203..3731e1c 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 74000f5..54784bb 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index 8d5c9e0..b7491ee 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index d44d3bc..79a598c 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 962a3cc..1055769 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -97,8 +97,6 @@
 #define AOPOBJ_OBJECT_INITIALIZED   0x08	/* Region is initialized, _REG was run */
 #define AOPOBJ_SETUP_COMPLETE       0x10	/* Region setup is complete */
 #define AOPOBJ_INVALID              0x20	/* Host OS won't allow a Region address */
-#define AOPOBJ_MODULE_LEVEL         0x40	/* Method is actually module-level code */
-#define AOPOBJ_MODIFIED_NAMESPACE   0x80	/* Method modified the namespace */
 
 /******************************************************************************
  *
@@ -175,7 +173,7 @@
 };
 
 struct acpi_object_method {
-	ACPI_OBJECT_COMMON_HEADER u8 method_flags;
+	ACPI_OBJECT_COMMON_HEADER u8 info_flags;
 	u8 param_count;
 	u8 sync_level;
 	union acpi_operand_object *mutex;
@@ -183,13 +181,21 @@
 	union {
 		ACPI_INTERNAL_METHOD implementation;
 		union acpi_operand_object *handler;
-	} extra;
+	} dispatch;
 
 	u32 aml_length;
 	u8 thread_count;
 	acpi_owner_id owner_id;
 };
 
+/* Flags for info_flags field above */
+
+#define ACPI_METHOD_MODULE_LEVEL        0x01	/* Method is actually module-level code */
+#define ACPI_METHOD_INTERNAL_ONLY       0x02	/* Method is implemented internally (_OSI) */
+#define ACPI_METHOD_SERIALIZED          0x04	/* Method is serialized */
+#define ACPI_METHOD_SERIALIZED_PENDING  0x08	/* Method is to be marked serialized */
+#define ACPI_METHOD_MODIFIED_NAMESPACE  0x10	/* Method modified the namespace */
+
 /******************************************************************************
  *
  * Objects that can be notified.  All share a common notify_info area.
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index 8c15ff4..bb2ccfa 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index d0bb0fd..5ea1e06 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 10998d3..94e73c9 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index 528bcba..f08b55b 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 6e5dd97..1623b24 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 62a576e..967f081 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 72e4183..99c140d 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 1f484ba..f4f0998 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -480,16 +480,10 @@
 	AML_FIELD_ATTRIB_SMB_BLOCK_CALL = 0x0D
 } AML_ACCESS_ATTRIBUTE;
 
-/* Bit fields in method_flags byte */
+/* Bit fields in the AML method_flags byte */
 
 #define AML_METHOD_ARG_COUNT        0x07
 #define AML_METHOD_SERIALIZED       0x08
 #define AML_METHOD_SYNC_LEVEL       0xF0
 
-/* METHOD_FLAGS_ARG_COUNT is not used internally, define additional flags */
-
-#define AML_METHOD_INTERNAL_ONLY    0x01
-#define AML_METHOD_RESERVED1        0x02
-#define AML_METHOD_RESERVED2        0x04
-
 #endif				/* __AMLCODE_H__ */
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 0e5798f..59122cd 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 347bee1..34be60c 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index cc4a38c..a7718bf 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index d94dd89..5d79775 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,6 @@
 
 #include <acpi/acpi.h>
 #include "accommon.h"
-#include "amlcode.h"
 #include "acdispat.h"
 #include "acinterp.h"
 #include "acnamesp.h"
@@ -201,7 +200,7 @@
 	/*
 	 * If this method is serialized, we need to acquire the method mutex.
 	 */
-	if (obj_desc->method.method_flags & AML_METHOD_SERIALIZED) {
+	if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) {
 		/*
 		 * Create a mutex for the method if it is defined to be Serialized
 		 * and a mutex has not already been created. We defer the mutex creation
@@ -413,8 +412,9 @@
 
 	/* Invoke an internal method if necessary */
 
-	if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
-		status = obj_desc->method.extra.implementation(next_walk_state);
+	if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) {
+		status =
+		    obj_desc->method.dispatch.implementation(next_walk_state);
 		if (status == AE_OK) {
 			status = AE_CTRL_TERMINATE;
 		}
@@ -579,11 +579,14 @@
 
 		/*
 		 * Delete any namespace objects created anywhere within the
-		 * namespace by the execution of this method. Unless this method
-		 * is a module-level executable code method, in which case we
-		 * want make the objects permanent.
+		 * namespace by the execution of this method. Unless:
+		 * 1) This method is a module-level executable code method, in which
+		 *    case we want make the objects permanent.
+		 * 2) There are other threads executing the method, in which case we
+		 *    will wait until the last thread has completed.
 		 */
-		if (!(method_desc->method.flags & AOPOBJ_MODULE_LEVEL)) {
+		if (!(method_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL)
+		    && (method_desc->method.thread_count == 1)) {
 
 			/* Delete any direct children of (created by) this method */
 
@@ -593,12 +596,17 @@
 			/*
 			 * Delete any objects that were created by this method
 			 * elsewhere in the namespace (if any were created).
+			 * Use of the ACPI_METHOD_MODIFIED_NAMESPACE optimizes the
+			 * deletion such that we don't have to perform an entire
+			 * namespace walk for every control method execution.
 			 */
 			if (method_desc->method.
-			    flags & AOPOBJ_MODIFIED_NAMESPACE) {
+			    info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) {
 				acpi_ns_delete_namespace_by_owner(method_desc->
 								  method.
 								  owner_id);
+				method_desc->method.info_flags &=
+				    ~ACPI_METHOD_MODIFIED_NAMESPACE;
 			}
 		}
 	}
@@ -629,19 +637,43 @@
 		 * Serialized if it appears that the method is incorrectly written and
 		 * does not support multiple thread execution. The best example of this
 		 * is if such a method creates namespace objects and blocks. A second
-		 * thread will fail with an AE_ALREADY_EXISTS exception
+		 * thread will fail with an AE_ALREADY_EXISTS exception.
 		 *
 		 * This code is here because we must wait until the last thread exits
-		 * before creating the synchronization semaphore.
+		 * before marking the method as serialized.
 		 */
-		if ((method_desc->method.method_flags & AML_METHOD_SERIALIZED)
-		    && (!method_desc->method.mutex)) {
-			(void)acpi_ds_create_method_mutex(method_desc);
+		if (method_desc->method.
+		    info_flags & ACPI_METHOD_SERIALIZED_PENDING) {
+			if (walk_state) {
+				ACPI_INFO((AE_INFO,
+					   "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error",
+					   walk_state->method_node->name.
+					   ascii));
+			}
+
+			/*
+			 * Method tried to create an object twice and was marked as
+			 * "pending serialized". The probable cause is that the method
+			 * cannot handle reentrancy.
+			 *
+			 * The method was created as not_serialized, but it tried to create
+			 * a named object and then blocked, causing the second thread
+			 * entrance to begin and then fail. Workaround this problem by
+			 * marking the method permanently as Serialized when the last
+			 * thread exits here.
+			 */
+			method_desc->method.info_flags &=
+			    ~ACPI_METHOD_SERIALIZED_PENDING;
+			method_desc->method.info_flags |=
+			    ACPI_METHOD_SERIALIZED;
+			method_desc->method.sync_level = 0;
 		}
 
 		/* No more threads, we can free the owner_id */
 
-		if (!(method_desc->method.flags & AOPOBJ_MODULE_LEVEL)) {
+		if (!
+		    (method_desc->method.
+		     info_flags & ACPI_METHOD_MODULE_LEVEL)) {
 			acpi_ut_release_owner_id(&method_desc->method.owner_id);
 		}
 	}
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index 8095306..905ce29 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 8e85f54..f42e17e 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index 7c0e742..bbecf29 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 15135c2..2c477ce 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 6b0b5d0..fe40e4c 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 140a9d0..52566ff 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index d1e7017..76a661f 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 83155dd..a6c374e 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index e5e313c..d458b04 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 7c339d3..14988a8 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -471,6 +471,7 @@
 
 	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
 	if (ACPI_FAILURE(status)) {
+		ACPI_FREE(local_gpe_event_info);
 		return_VOID;
 	}
 
@@ -478,6 +479,7 @@
 
 	if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
 		status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+		ACPI_FREE(local_gpe_event_info);
 		return_VOID;
 	}
 
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 9acb869..ca2c41a 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index c59dc23..ce9aa9f 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 10e4774..80a81d0 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 38bba66..7dc8094 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 98fd210..785a5ee 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 0b47a6d..9659cee 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -590,9 +590,9 @@
 				 * See acpi_ns_exec_module_code
 				 */
 				if (obj_desc->method.
-				    flags & AOPOBJ_MODULE_LEVEL) {
+				    info_flags & ACPI_METHOD_MODULE_LEVEL) {
 					handler_obj =
-					    obj_desc->method.extra.handler;
+					    obj_desc->method.dispatch.handler;
 				}
 				break;
 
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 8dfbaa9..2ebd40e 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 1226689..e114140 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 90488c1..c57b5c7 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 416845b..3b20a34 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -212,37 +212,40 @@
 		return_ACPI_STATUS(AE_BAD_PARAMETER);
 	}
 
-	/* Validate wake_device is of type Device */
-
-	device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
-	if (device_node->type != ACPI_TYPE_DEVICE) {
-		return_ACPI_STATUS(AE_BAD_PARAMETER);
-	}
-
 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
 
 	/* Ensure that we have a valid GPE number */
 
 	gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
-	if (gpe_event_info) {
-		/*
-		 * If there is no method or handler for this GPE, then the
-		 * wake_device will be notified whenever this GPE fires (aka
-		 * "implicit notify") Note: The GPE is assumed to be
-		 * level-triggered (for windows compatibility).
-		 */
-		if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
-		    ACPI_GPE_DISPATCH_NONE) {
-			gpe_event_info->flags =
-			    (ACPI_GPE_DISPATCH_NOTIFY |
-			     ACPI_GPE_LEVEL_TRIGGERED);
-			gpe_event_info->dispatch.device_node = device_node;
-		}
-
-		gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
-		status = AE_OK;
+	if (!gpe_event_info) {
+		goto unlock_and_exit;
 	}
 
+	/*
+	 * If there is no method or handler for this GPE, then the
+	 * wake_device will be notified whenever this GPE fires (aka
+	 * "implicit notify") Note: The GPE is assumed to be
+	 * level-triggered (for windows compatibility).
+	 */
+	if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
+	      ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) {
+
+		/* Validate wake_device is of type Device */
+
+		device_node = ACPI_CAST_PTR(struct acpi_namespace_node,
+					    wake_device);
+		if (device_node->type != ACPI_TYPE_DEVICE) {
+			goto unlock_and_exit;
+		}
+		gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
+					 ACPI_GPE_LEVEL_TRIGGERED);
+		gpe_event_info->dispatch.device_node = device_node;
+	}
+
+	gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
+	status = AE_OK;
+
+ unlock_and_exit:
 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
 	return_ACPI_STATUS(status);
 }
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index ce9314f..eb73867 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 1883220..745a42b 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index b73bc50..74162a1 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 3c61b48..e7b372d 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -482,13 +482,11 @@
 	obj_desc->method.aml_length = aml_length;
 
 	/*
-	 * Disassemble the method flags. Split off the Arg Count
-	 * for efficiency
+	 * Disassemble the method flags. Split off the arg_count, Serialized
+	 * flag, and sync_level for efficiency.
 	 */
 	method_flags = (u8) operand[1]->integer.value;
 
-	obj_desc->method.method_flags =
-	    (u8) (method_flags & ~AML_METHOD_ARG_COUNT);
 	obj_desc->method.param_count =
 	    (u8) (method_flags & AML_METHOD_ARG_COUNT);
 
@@ -497,6 +495,8 @@
 	 * created for this method when it is parsed.
 	 */
 	if (method_flags & AML_METHOD_SERIALIZED) {
+		obj_desc->method.info_flags = ACPI_METHOD_SERIALIZED;
+
 		/*
 		 * ACPI 1.0: sync_level = 0
 		 * ACPI 2.0: sync_level = sync_level in method declaration
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index be8c98b..c7a2f1e 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index f067bbb..61b8c0e 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -122,7 +122,7 @@
 
 static struct acpi_exdump_info acpi_ex_dump_method[9] = {
 	{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL},
-	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.method_flags), "Method Flags"},
+	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.info_flags), "Info Flags"},
 	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count),
 	 "Parameter Count"},
 	{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.sync_level), "Sync Level"},
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index f17d2ff..0bde223 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 38293fd..6c79c29 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 95db4be0..703d88e 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 6af14e4..be1c56e 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index d11e539..49ec049 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 84e4d18..236ead1 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 10e104c..2571b4a 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 7a08d23..1b48d9d 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index 4b50730..f4a2787 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 7aae29f..cc95e20 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index de17e10..f0d5e14 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 1fa4289..55997e4 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index 7ca35ea..db502cd 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index 8c97cfd..e3bb00c 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 1624436..c0c8842 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index d4af684..a979017 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -7,7 +7,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index e972b66..dc665cc 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index 675aaa9..df66e7b 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 4093522..8ad9314 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index b44274a..fc380d3 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 85c3cbd..f610d88 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index ad21c7d..050fd22 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 5d1273b..55accb7 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -7,7 +7,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 3796811..2ac28bb 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 1ef8e0b..9c8eb71 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index e1d9c77..5f16058 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index 50cc3be..6f98d21 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index 0cd925b..d93172f 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -163,9 +163,9 @@
 #else
 				/* Mark this as a very SPECIAL method */
 
-				obj_desc->method.method_flags =
-				    AML_METHOD_INTERNAL_ONLY;
-				obj_desc->method.extra.implementation =
+				obj_desc->method.info_flags =
+				    ACPI_METHOD_INTERNAL_ONLY;
+				obj_desc->method.dispatch.implementation =
 				    acpi_ut_osi_implementation;
 #endif
 				break;
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 1e5ff80..1d0ef15 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -234,8 +234,8 @@
 			 * modified the namespace. This is used for cleanup when the
 			 * method exits.
 			 */
-			walk_state->method_desc->method.flags |=
-			    AOPOBJ_MODIFIED_NAMESPACE;
+			walk_state->method_desc->method.info_flags |=
+			    ACPI_METHOD_MODIFIED_NAMESPACE;
 		}
 	}
 
@@ -341,6 +341,7 @@
 {
 	struct acpi_namespace_node *child_node = NULL;
 	u32 level = 1;
+	acpi_status status;
 
 	ACPI_FUNCTION_TRACE(ns_delete_namespace_subtree);
 
@@ -348,6 +349,13 @@
 		return_VOID;
 	}
 
+	/* Lock namespace for possible update */
+
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		return_VOID;
+	}
+
 	/*
 	 * Traverse the tree of objects until we bubble back up
 	 * to where we started.
@@ -397,6 +405,7 @@
 		}
 	}
 
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 	return_VOID;
 }
 
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index a54dc39..b683cc2 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -624,9 +624,22 @@
 		     acpi_owner_id owner_id, acpi_handle start_handle)
 {
 	struct acpi_walk_info info;
+	acpi_status status;
 
 	ACPI_FUNCTION_ENTRY();
 
+	/*
+	 * Just lock the entire namespace for the duration of the dump.
+	 * We don't want any changes to the namespace during this time,
+	 * especially the temporary nodes since we are going to display
+	 * them also.
+	 */
+	status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+	if (ACPI_FAILURE(status)) {
+		acpi_os_printf("Could not acquire namespace mutex\n");
+		return;
+	}
+
 	info.debug_level = ACPI_LV_TABLES;
 	info.owner_id = owner_id;
 	info.display_type = display_type;
@@ -636,6 +649,8 @@
 				     ACPI_NS_WALK_TEMP_NODES,
 				     acpi_ns_dump_one_object, NULL,
 				     (void *)&info, NULL);
+
+	(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 }
 #endif				/* ACPI_FUTURE_USAGE */
 
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index d2a9792..2ed294b 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index f52829c..c1bd02b 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -389,7 +389,7 @@
 	 * acpi_gbl_root_node->Object is NULL at PASS1.
 	 */
 	if ((type == ACPI_TYPE_DEVICE) && parent_node->object) {
-		method_obj->method.extra.handler =
+		method_obj->method.dispatch.handler =
 		    parent_node->object->device.handler;
 	}
 
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index 0cac7ec..fd7c638 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index df18be9..5f7dc69 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index d3104af..d5fa520 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 41a9213..3bb8bf1 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index 5808c89..b3234fa 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 7096bcd..9fb03fa 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index d1c1366..1d76ac8 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 4ef9f43..973883b 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 41102a8..28b0d7a 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index a7d6ad9..cb1b104 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 2cd5be8..345f0c3 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index ebef8a7..c53f004 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index b01e45a..3fd4526 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -603,10 +603,9 @@
 	method_obj->method.param_count = (u8)
 	    (method_flags & AML_METHOD_ARG_COUNT);
 
-	method_obj->method.method_flags = (u8)
-	    (method_flags & ~AML_METHOD_ARG_COUNT);
-
 	if (method_flags & AML_METHOD_SERIALIZED) {
+		method_obj->method.info_flags = ACPI_METHOD_SERIALIZED;
+
 		method_obj->method.sync_level = (u8)
 		    ((method_flags & AML_METHOD_SYNC_LEVEL) >> 4);
 	}
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index a1f04e9..db7660f 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 7df1a4c..e1fad0e 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 2f2e776..01dd70d 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -655,7 +655,7 @@
 		method_obj->method.aml_start = aml_start;
 		method_obj->method.aml_length = aml_length;
 		method_obj->method.owner_id = owner_id;
-		method_obj->method.flags |= AOPOBJ_MODULE_LEVEL;
+		method_obj->method.info_flags |= ACPI_METHOD_MODULE_LEVEL;
 
 		/*
 		 * Save the parent node in next_object. This is cheating, but we
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index 2b0c3be..bed08de 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 8d81542..9bb0cbd 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -55,7 +55,6 @@
 #include "acparser.h"
 #include "acdispat.h"
 #include "amlcode.h"
-#include "acnamesp.h"
 #include "acinterp.h"
 
 #define _COMPONENT          ACPI_PARSER
@@ -539,24 +538,16 @@
 			/* Check for possible multi-thread reentrancy problem */
 
 			if ((status == AE_ALREADY_EXISTS) &&
-			    (!walk_state->method_desc->method.mutex)) {
-				ACPI_INFO((AE_INFO,
-					   "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error",
-					   walk_state->method_node->name.
-					   ascii));
-
+			    (!(walk_state->method_desc->method.
+			       info_flags & ACPI_METHOD_SERIALIZED))) {
 				/*
-				 * Method tried to create an object twice. The probable cause is
-				 * that the method cannot handle reentrancy.
-				 *
-				 * The method is marked not_serialized, but it tried to create
-				 * a named object, causing the second thread entrance to fail.
-				 * Workaround this problem by marking the method permanently
-				 * as Serialized.
+				 * Method is not serialized and tried to create an object
+				 * twice. The probable cause is that the method cannot
+				 * handle reentrancy. Mark as "pending serialized" now, and
+				 * then mark "serialized" when the last thread exits.
 				 */
-				walk_state->method_desc->method.method_flags |=
-				    AML_METHOD_SERIALIZED;
-				walk_state->method_desc->method.sync_level = 0;
+				walk_state->method_desc->method.info_flags |=
+				    ACPI_METHOD_SERIALIZED_PENDING;
 			}
 		}
 
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index 40e2b27..a5faa13 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index d4b970c..f1464c0 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index fe29eee..7eda785 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index 8abb962..3312d63 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index c42f067..8086805 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,6 @@
 #include "acdispat.h"
 #include "acinterp.h"
 #include "actables.h"
-#include "amlcode.h"
 
 #define _COMPONENT          ACPI_PARSER
 ACPI_MODULE_NAME("psxface")
@@ -285,15 +284,15 @@
 		goto cleanup;
 	}
 
-	if (info->obj_desc->method.flags & AOPOBJ_MODULE_LEVEL) {
+	if (info->obj_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL) {
 		walk_state->parse_flags |= ACPI_PARSE_MODULE_LEVEL;
 	}
 
 	/* Invoke an internal method if necessary */
 
-	if (info->obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
+	if (info->obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) {
 		status =
-		    info->obj_desc->method.extra.implementation(walk_state);
+		    info->obj_desc->method.dispatch.implementation(walk_state);
 		info->return_object = walk_state->return_desc;
 
 		/* Cleanup states */
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 226c806..9e66f90 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index d6ebf7e..3a8a89e 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index c80a2ee..4ce6e11 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index f859b03..33db752 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index 1fd868b..f9ea608 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
index 33bff17..0c7efef 100644
--- a/drivers/acpi/acpica/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index 545da40..50b8ad2 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 7335f22..1bfcef7 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index 887b8ba..7cc6d86 100644
--- a/drivers/acpi/acpica/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index f8cd9e8..410264b 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 491191e..231811e 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 9f6a6e7..2ff657a 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index d2ff432..428d44e 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index 989d5c8..a55cb2b 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 83d7af8..48db094 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 34f9c2b..0f2d395 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 4a8b9e6..4b7085d 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index fd2c07d..7eb6c6c 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 8f08962..0a69735 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 6fef83f..aded299 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index f21c486..a9bcd81 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index ed794cd..31f5a78 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 22f59ef..18f73c9 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 508537f..97dd9bb 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index d290632..b679ea6 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index c1b1c80..191b682 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index b081cd4..f6bb75c 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index 49cf7b7..ce481da 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index c7d0e05..c33a852 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 199528f..a946c68 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index fd1fa27..188340a 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 18c59a8..1fb10cb 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 7965919..84e0518 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index d35d109..30c21e1 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 1f484c9..98ad125 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 6f12e314..916ae09 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 68bc227..ac1a599 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -998,7 +998,6 @@
 	if (!device)
 		return -EINVAL;
 	battery = acpi_driver_data(device);
-	acpi_battery_refresh(battery);
 	battery->update_time = 0;
 	acpi_battery_update(battery);
 	return 0;
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
index 54b6ab8..fa5a1df 100644
--- a/drivers/acpi/nvs.c
+++ b/drivers/acpi/nvs.c
@@ -12,6 +12,7 @@
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <linux/acpi.h>
+#include <linux/acpi_io.h>
 #include <acpi/acpiosxf.h>
 
 /*
@@ -80,7 +81,7 @@
 			free_page((unsigned long)entry->data);
 			entry->data = NULL;
 			if (entry->kaddr) {
-				acpi_os_unmap_memory(entry->kaddr, entry->size);
+				iounmap(entry->kaddr);
 				entry->kaddr = NULL;
 			}
 		}
@@ -114,8 +115,8 @@
 
 	list_for_each_entry(entry, &nvs_list, node)
 		if (entry->data) {
-			entry->kaddr = acpi_os_map_memory(entry->phys_start,
-							  entry->size);
+			entry->kaddr = acpi_os_ioremap(entry->phys_start,
+						    entry->size);
 			if (!entry->kaddr) {
 				suspend_nvs_free();
 				return -ENOMEM;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index e2dd6de..c90c76a 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -38,6 +38,7 @@
 #include <linux/workqueue.h>
 #include <linux/nmi.h>
 #include <linux/acpi.h>
+#include <linux/acpi_io.h>
 #include <linux/efi.h>
 #include <linux/ioport.h>
 #include <linux/list.h>
@@ -302,9 +303,10 @@
 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
 {
 	struct acpi_ioremap *map, *tmp_map;
-	unsigned long flags, pg_sz;
+	unsigned long flags;
 	void __iomem *virt;
-	phys_addr_t pg_off;
+	acpi_physical_address pg_off;
+	acpi_size pg_sz;
 
 	if (phys > ULONG_MAX) {
 		printk(KERN_ERR PREFIX "Cannot map memory that high\n");
@@ -320,7 +322,7 @@
 
 	pg_off = round_down(phys, PAGE_SIZE);
 	pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
-	virt = ioremap_cache(pg_off, pg_sz);
+	virt = acpi_os_ioremap(pg_off, pg_sz);
 	if (!virt) {
 		kfree(map);
 		return NULL;
@@ -634,17 +636,21 @@
 acpi_status
 acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
 {
-	u32 dummy;
 	void __iomem *virt_addr;
-	int size = width / 8, unmap = 0;
+	unsigned int size = width / 8;
+	bool unmap = false;
+	u32 dummy;
 
 	rcu_read_lock();
 	virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
-	rcu_read_unlock();
 	if (!virt_addr) {
-		virt_addr = ioremap_cache(phys_addr, size);
-		unmap = 1;
+		rcu_read_unlock();
+		virt_addr = acpi_os_ioremap(phys_addr, size);
+		if (!virt_addr)
+			return AE_BAD_ADDRESS;
+		unmap = true;
 	}
+
 	if (!value)
 		value = &dummy;
 
@@ -664,6 +670,8 @@
 
 	if (unmap)
 		iounmap(virt_addr);
+	else
+		rcu_read_unlock();
 
 	return AE_OK;
 }
@@ -672,14 +680,17 @@
 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
 {
 	void __iomem *virt_addr;
-	int size = width / 8, unmap = 0;
+	unsigned int size = width / 8;
+	bool unmap = false;
 
 	rcu_read_lock();
 	virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
-	rcu_read_unlock();
 	if (!virt_addr) {
-		virt_addr = ioremap_cache(phys_addr, size);
-		unmap = 1;
+		rcu_read_unlock();
+		virt_addr = acpi_os_ioremap(phys_addr, size);
+		if (!virt_addr)
+			return AE_BAD_ADDRESS;
+		unmap = true;
 	}
 
 	switch (width) {
@@ -698,6 +709,8 @@
 
 	if (unmap)
 		iounmap(virt_addr);
+	else
+		rcu_read_unlock();
 
 	return AE_OK;
 }
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index fdd3aee..d6a8cd1 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -166,6 +166,7 @@
 	u32 acpi_state = acpi_target_sleep_state;
 
 	acpi_ec_unblock_transactions();
+	suspend_nvs_free();
 
 	if (acpi_state == ACPI_STATE_S0)
 		return;
@@ -186,7 +187,6 @@
  */
 static void acpi_pm_end(void)
 {
-	suspend_nvs_free();
 	/*
 	 * This is necessary in case acpi_pm_finish() is not called during a
 	 * failing transition to a sleep state.
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 42d3d72..5af3479 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -82,6 +82,11 @@
 	if (!device)
 		return 0;
 
+	/* Is this device able to support video switching ? */
+	if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) ||
+	    ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy)))
+		video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
+
 	/* Is this device able to retrieve a video ROM ? */
 	if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy)))
 		video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index ed65014..7bfbe40 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -86,8 +86,12 @@
 		struct acpi_device *dev = container_of(node,
 						       struct acpi_device,
 						       wakeup_list);
-		if (device_can_wakeup(&dev->dev))
+		if (device_can_wakeup(&dev->dev)) {
+			/* Button GPEs are supposed to be always enabled. */
+			acpi_enable_gpe(dev->wakeup.gpe_device,
+					dev->wakeup.gpe_number);
 			device_set_wakeup_enable(&dev->dev, true);
+		}
 	}
 	mutex_unlock(&acpi_device_lock);
 	return 0;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index c6b298d..c2328ae 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -783,7 +783,7 @@
 
 config PATA_PLATFORM
 	tristate "Generic platform device PATA support"
-	depends on EMBEDDED || PPC || HAVE_PATA_PLATFORM
+	depends on EXPERT || PPC || HAVE_PATA_PLATFORM
 	help
 	  This option enables support for generic directly connected ATA
 	  devices commonly found on embedded systems.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 3288263..b8d96ce 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -260,6 +260,7 @@
 	{ PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
 	{ PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
 	{ PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
+	{ PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
 
 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -379,6 +380,8 @@
 	{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv },	/* 6145 */
 	{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv },	/* 6121 */
 	{ PCI_DEVICE(0x1b4b, 0x9123),
+	  .class = PCI_CLASS_STORAGE_SATA_AHCI,
+	  .class_mask = 0xffffff,
 	  .driver_data = board_ahci_yes_fbs },			/* 88se9128 */
 
 	/* Promise */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index a31fe96..d4e52e2 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4138,6 +4138,7 @@
 	 * device and controller are SATA.
 	 */
 	{ "PIONEER DVD-RW  DVRTD08",	"1.00",	ATA_HORKAGE_NOSETXFER },
+	{ "PIONEER DVD-RW  DVR-212D",	"1.28", ATA_HORKAGE_NOSETXFER },
 
 	/* End Marker */
 	{ }
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 5defc74..600f635 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1099,9 +1099,9 @@
 		struct request_queue *q = sdev->request_queue;
 		void *buf;
 
-		/* set the min alignment and padding */
-		blk_queue_update_dma_alignment(sdev->request_queue,
-					       ATA_DMA_PAD_SZ - 1);
+		sdev->sector_size = ATA_SECT_SIZE;
+
+		/* set DMA padding */
 		blk_queue_update_dma_pad(sdev->request_queue,
 					 ATA_DMA_PAD_SZ - 1);
 
@@ -1115,13 +1115,25 @@
 
 		blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN);
 	} else {
-		/* ATA devices must be sector aligned */
 		sdev->sector_size = ata_id_logical_sector_size(dev->id);
-		blk_queue_update_dma_alignment(sdev->request_queue,
-					       sdev->sector_size - 1);
 		sdev->manage_start_stop = 1;
 	}
 
+	/*
+	 * ata_pio_sectors() expects buffer for each sector to not cross
+	 * page boundary.  Enforce it by requiring buffers to be sector
+	 * aligned, which works iff sector_size is not larger than
+	 * PAGE_SIZE.  ATAPI devices also need the alignment as
+	 * IDENTIFY_PACKET is executed as ATA_PROT_PIO.
+	 */
+	if (sdev->sector_size > PAGE_SIZE)
+		ata_dev_printk(dev, KERN_WARNING,
+			"sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
+			sdev->sector_size);
+
+	blk_queue_update_dma_alignment(sdev->request_queue,
+				       sdev->sector_size - 1);
+
 	if (dev->flags & ATA_DFLAG_AN)
 		set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events);
 
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index d7e57db..538ec38 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -25,7 +25,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"pata_hpt366"
-#define DRV_VERSION	"0.6.9"
+#define DRV_VERSION	"0.6.10"
 
 struct hpt_clock {
 	u8	xfer_mode;
@@ -160,8 +160,8 @@
 
 	while (list[i] != NULL) {
 		if (!strcmp(list[i], model_num)) {
-			printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n",
-				modestr, list[i]);
+			pr_warning(DRV_NAME ": %s is not supported for %s.\n",
+				   modestr, list[i]);
 			return 1;
 		}
 		i++;
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index efdd18b..4c5b518 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -24,7 +24,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"pata_hpt37x"
-#define DRV_VERSION	"0.6.18"
+#define DRV_VERSION	"0.6.22"
 
 struct hpt_clock {
 	u8	xfer_speed;
@@ -229,8 +229,8 @@
 
 	while (list[i] != NULL) {
 		if (!strcmp(list[i], model_num)) {
-			printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n",
-				modestr, list[i]);
+			pr_warning(DRV_NAME ": %s is not supported for %s.\n",
+				   modestr, list[i]);
 			return 1;
 		}
 		i++;
@@ -642,7 +642,6 @@
 static struct ata_port_operations hpt374_fn1_port_ops = {
 	.inherits	= &hpt372_port_ops,
 	.cable_detect	= hpt374_fn1_cable_detect,
-	.prereset	= hpt37x_pre_reset,
 };
 
 /**
@@ -803,7 +802,7 @@
 		.udma_mask = ATA_UDMA6,
 		.port_ops = &hpt302_port_ops
 	};
-	/* HPT374 - UDMA100, function 1 uses different prereset method */
+	/* HPT374 - UDMA100, function 1 uses different cable_detect method */
 	static const struct ata_port_info info_hpt374_fn0 = {
 		.flags = ATA_FLAG_SLAVE_POSS,
 		.pio_mask = ATA_PIO4,
@@ -838,7 +837,8 @@
 	if (rc)
 		return rc;
 
-	if (dev->device == PCI_DEVICE_ID_TTI_HPT366) {
+	switch (dev->device) {
+	case PCI_DEVICE_ID_TTI_HPT366:
 		/* May be a later chip in disguise. Check */
 		/* Older chips are in the HPT366 driver. Ignore them */
 		if (rev < 3)
@@ -863,54 +863,50 @@
 			chip_table = &hpt372;
 			break;
 		default:
-			printk(KERN_ERR "pata_hpt37x: Unknown HPT366 subtype, "
+			pr_err(DRV_NAME ": Unknown HPT366 subtype, "
 			       "please report (%d).\n", rev);
 			return -ENODEV;
 		}
-	} else {
-		switch (dev->device) {
-		case PCI_DEVICE_ID_TTI_HPT372:
-			/* 372N if rev >= 2 */
-			if (rev >= 2)
-				return -ENODEV;
-			ppi[0] = &info_hpt372;
-			chip_table = &hpt372a;
-			break;
-		case PCI_DEVICE_ID_TTI_HPT302:
-			/* 302N if rev > 1 */
-			if (rev > 1)
-				return -ENODEV;
-			ppi[0] = &info_hpt302;
-			/* Check this */
-			chip_table = &hpt302;
-			break;
-		case PCI_DEVICE_ID_TTI_HPT371:
-			if (rev > 1)
-				return -ENODEV;
-			ppi[0] = &info_hpt302;
-			chip_table = &hpt371;
-			/*
-			 * Single channel device, master is not present
-			 * but the BIOS (or us for non x86) must mark it
-			 * absent
-			 */
-			pci_read_config_byte(dev, 0x50, &mcr1);
-			mcr1 &= ~0x04;
-			pci_write_config_byte(dev, 0x50, mcr1);
-			break;
-		case PCI_DEVICE_ID_TTI_HPT374:
-			chip_table = &hpt374;
-			if (!(PCI_FUNC(dev->devfn) & 1))
-				*ppi = &info_hpt374_fn0;
-			else
-				*ppi = &info_hpt374_fn1;
-			break;
-		default:
-			printk(KERN_ERR
-			       "pata_hpt37x: PCI table is bogus, please report (%d).\n",
-			       dev->device);
-				return -ENODEV;
-		}
+		break;
+	case PCI_DEVICE_ID_TTI_HPT372:
+		/* 372N if rev >= 2 */
+		if (rev >= 2)
+			return -ENODEV;
+		ppi[0] = &info_hpt372;
+		chip_table = &hpt372a;
+		break;
+	case PCI_DEVICE_ID_TTI_HPT302:
+		/* 302N if rev > 1 */
+		if (rev > 1)
+			return -ENODEV;
+		ppi[0] = &info_hpt302;
+		/* Check this */
+		chip_table = &hpt302;
+		break;
+	case PCI_DEVICE_ID_TTI_HPT371:
+		if (rev > 1)
+			return -ENODEV;
+		ppi[0] = &info_hpt302;
+		chip_table = &hpt371;
+		/*
+		 * Single channel device, master is not present but the BIOS
+		 * (or us for non x86) must mark it absent
+		 */
+		pci_read_config_byte(dev, 0x50, &mcr1);
+		mcr1 &= ~0x04;
+		pci_write_config_byte(dev, 0x50, mcr1);
+		break;
+	case PCI_DEVICE_ID_TTI_HPT374:
+		chip_table = &hpt374;
+		if (!(PCI_FUNC(dev->devfn) & 1))
+			*ppi = &info_hpt374_fn0;
+		else
+			*ppi = &info_hpt374_fn1;
+		break;
+	default:
+		pr_err(DRV_NAME ": PCI table is bogus, please report (%d).\n",
+		       dev->device);
+		return -ENODEV;
 	}
 	/* Ok so this is a chip we support */
 
@@ -957,8 +953,7 @@
 		u8 sr;
 		u32 total = 0;
 
-		printk(KERN_WARNING
-		       "pata_hpt37x: BIOS has not set timing clocks.\n");
+		pr_warning(DRV_NAME ": BIOS has not set timing clocks.\n");
 
 		/* This is the process the HPT371 BIOS is reported to use */
 		for (i = 0; i < 128; i++) {
@@ -1014,7 +1009,7 @@
 					       (f_high << 16) | f_low | 0x100);
 		}
 		if (adjust == 8) {
-			printk(KERN_ERR "pata_hpt37x: DPLL did not stabilize!\n");
+			pr_err(DRV_NAME ": DPLL did not stabilize!\n");
 			return -ENODEV;
 		}
 		if (dpll == 3)
@@ -1022,8 +1017,8 @@
 		else
 			private_data = (void *)hpt37x_timings_50;
 
-		printk(KERN_INFO "pata_hpt37x: bus clock %dMHz, using %dMHz DPLL.\n",
-		       MHz[clock_slot], MHz[dpll]);
+		pr_info(DRV_NAME ": bus clock %dMHz, using %dMHz DPLL.\n",
+			MHz[clock_slot], MHz[dpll]);
 	} else {
 		private_data = (void *)chip_table->clocks[clock_slot];
 		/*
@@ -1036,8 +1031,9 @@
 			ppi[0] = &info_hpt370_33;
 		if (clock_slot < 2 && ppi[0] == &info_hpt370a)
 			ppi[0] = &info_hpt370a_33;
-		printk(KERN_INFO "pata_hpt37x: %s using %dMHz bus clock.\n",
-		       chip_table->name, MHz[clock_slot]);
+
+		pr_info(DRV_NAME ": %s using %dMHz bus clock.\n",
+			chip_table->name, MHz[clock_slot]);
 	}
 
 	/* Now kick off ATA set up */
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index d2239bb..eca68ca 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -25,7 +25,7 @@
 #include <linux/libata.h>
 
 #define DRV_NAME	"pata_hpt3x2n"
-#define DRV_VERSION	"0.3.13"
+#define DRV_VERSION	"0.3.14"
 
 enum {
 	HPT_PCI_FAST	=	(1 << 31),
@@ -418,7 +418,7 @@
 		u16 sr;
 		u32 total = 0;
 
-		printk(KERN_WARNING "pata_hpt3x2n: BIOS clock data not set.\n");
+		pr_warning(DRV_NAME ": BIOS clock data not set.\n");
 
 		/* This is the process the HPT371 BIOS is reported to use */
 		for (i = 0; i < 128; i++) {
@@ -528,8 +528,7 @@
 		ppi[0] = &info_hpt372n;
 		break;
 	default:
-		printk(KERN_ERR
-		       "pata_hpt3x2n: PCI table is bogus please report (%d).\n",
+		pr_err(DRV_NAME ": PCI table is bogus, please report (%d).\n",
 		       dev->device);
 		return -ENODEV;
 	}
@@ -579,12 +578,11 @@
 		pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
 	}
 	if (adjust == 8) {
-		printk(KERN_ERR "pata_hpt3x2n: DPLL did not stabilize!\n");
+		pr_err(DRV_NAME ": DPLL did not stabilize!\n");
 		return -ENODEV;
 	}
 
-	printk(KERN_INFO "pata_hpt37x: bus clock %dMHz, using 66MHz DPLL.\n",
-	       pci_mhz);
+	pr_info(DRV_NAME ": bus clock %dMHz, using 66MHz DPLL.\n", pci_mhz);
 
 	/*
 	 * Set our private data up. We only need a few flags
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 8cc536e..d7d8026 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -610,7 +610,7 @@
 };
 
 static struct ata_port_operations mpc52xx_ata_port_ops = {
-	.inherits		= &ata_sff_port_ops,
+	.inherits		= &ata_bmdma_port_ops,
 	.sff_dev_select		= mpc52xx_ata_dev_select,
 	.set_piomode		= mpc52xx_ata_set_piomode,
 	.set_dmamode		= mpc52xx_ata_set_dmamode,
diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c
index bca9cb8..487a547 100644
--- a/drivers/atm/idt77105.c
+++ b/drivers/atm/idt77105.c
@@ -151,7 +151,7 @@
 	spin_unlock_irqrestore(&idt77105_priv_lock, flags);
 	if (arg == NULL)
 		return 0;
-	return copy_to_user(arg, &PRIV(dev)->stats,
+	return copy_to_user(arg, &stats,
 		    sizeof(struct idt77105_stats)) ? -EFAULT : 0;
 }
 
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 73fb1c4..25ef1a4 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -866,8 +866,9 @@
 	}
 
 	skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
-	if (!skb && net_ratelimit()) {
-		dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
+	if (!skb) {
+		if (net_ratelimit())
+			dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
 		return -ENOMEM;
 	}
 	header = (void *)skb_put(skb, sizeof(*header));
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index fd96345..d57e8d0 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -70,7 +70,7 @@
 	  If unsure say Y here.
 
 config FW_LOADER
-	tristate "Userspace firmware loading support" if EMBEDDED
+	tristate "Userspace firmware loading support" if EXPERT
 	default y
 	---help---
 	  This option is provided for the case where no in-kernel-tree modules
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 656493a..42615b4 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -407,12 +407,15 @@
 		goto out;
 	}
 
+	/* Maybe the parent is now able to suspend. */
 	if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
-		spin_unlock_irq(&dev->power.lock);
+		spin_unlock(&dev->power.lock);
 
-		pm_request_idle(parent);
+		spin_lock(&parent->power.lock);
+		rpm_idle(parent, RPM_ASYNC);
+		spin_unlock(&parent->power.lock);
 
-		spin_lock_irq(&dev->power.lock);
+		spin_lock(&dev->power.lock);
 	}
 
  out:
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index d7f463d..40528ba 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -39,4 +39,4 @@
 obj-$(CONFIG_BLK_DEV_DRBD)     += drbd/
 obj-$(CONFIG_BLK_DEV_RBD)     += rbd.o
 
-swim_mod-objs	:= swim.o swim_asm.o
+swim_mod-y	:= swim.o swim_asm.o
diff --git a/drivers/block/aoe/Makefile b/drivers/block/aoe/Makefile
index e76d997..06ea82c 100644
--- a/drivers/block/aoe/Makefile
+++ b/drivers/block/aoe/Makefile
@@ -3,4 +3,4 @@
 #
 
 obj-$(CONFIG_ATA_OVER_ETH)	+= aoe.o
-aoe-objs := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o
+aoe-y := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 516d5bb..9279272 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -2833,7 +2833,7 @@
 	sector_t total_size;
 	InquiryData_struct *inq_buff = NULL;
 
-	for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
+	for (logvol = 0; logvol <= h->highest_lun; logvol++) {
 		if (!h->drv[logvol])
 			continue;
 		if (memcmp(h->drv[logvol]->LunID, drv->LunID,
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 44e18c0..49e6a54 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1641,6 +1641,9 @@
 
 static void loop_free(struct loop_device *lo)
 {
+	if (!lo->lo_queue->queue_lock)
+		lo->lo_queue->queue_lock = &lo->lo_queue->__queue_lock;
+
 	blk_cleanup_queue(lo->lo_queue);
 	put_disk(lo->lo_disk);
 	list_del(&lo->lo_list);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index a32fb41..e6fc716 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -53,7 +53,6 @@
 #define DBG_BLKDEV      0x0100
 #define DBG_RX          0x0200
 #define DBG_TX          0x0400
-static DEFINE_MUTEX(nbd_mutex);
 static unsigned int debugflags;
 #endif /* NDEBUG */
 
@@ -718,11 +717,9 @@
 	dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
 			lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
 
-	mutex_lock(&nbd_mutex);
 	mutex_lock(&lo->tx_lock);
 	error = __nbd_ioctl(bdev, lo, cmd, arg);
 	mutex_unlock(&lo->tx_lock);
-	mutex_unlock(&nbd_mutex);
 
 	return error;
 }
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 14033a3..e2c48a7 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -409,7 +409,8 @@
 	}
 
 	ENSURE(drive_status, CDC_DRIVE_STATUS );
-	ENSURE(media_changed, CDC_MEDIA_CHANGED);
+	if (cdo->check_events == NULL && cdo->media_changed == NULL)
+		*change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC);
 	ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
 	ENSURE(lock_door, CDC_LOCK);
 	ENSURE(select_speed, CDC_SELECT_SPEED);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 0f175a8..b7980a83 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -5,7 +5,7 @@
 menu "Character devices"
 
 config VT
-	bool "Virtual terminal" if EMBEDDED
+	bool "Virtual terminal" if EXPERT
 	depends on !S390
 	select INPUT
 	default y
@@ -39,13 +39,13 @@
 config CONSOLE_TRANSLATIONS
 	depends on VT
 	default y
-	bool "Enable character translations in console" if EMBEDDED
+	bool "Enable character translations in console" if EXPERT
 	---help---
 	  This enables support for font mapping and Unicode translation
 	  on virtual consoles.
 
 config VT_CONSOLE
-	bool "Support for console on virtual terminal" if EMBEDDED
+	bool "Support for console on virtual terminal" if EXPERT
 	depends on VT
 	default y
 	---help---
@@ -426,10 +426,10 @@
          If you have an SGI Altix with an attached SABrick
          say Y or M here, otherwise say N.
 
-source "drivers/serial/Kconfig"
+source "drivers/tty/serial/Kconfig"
 
 config UNIX98_PTYS
-	bool "Unix98 PTY support" if EMBEDDED
+	bool "Unix98 PTY support" if EXPERT
 	default y
 	---help---
 	  A pseudo terminal (PTY) is a software device consisting of two
@@ -495,7 +495,7 @@
 
 config TTY_PRINTK
 	bool "TTY driver to output user messages via printk"
-	depends on EMBEDDED
+	depends on EXPERT
 	default n
 	---help---
 	  If you say Y here, the support for writing user messages (i.e.
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 1e9dffb..8238f89 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -30,17 +30,6 @@
 obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
 obj-$(CONFIG_SX)		+= sx.o generic_serial.o
 obj-$(CONFIG_RIO)		+= rio/ generic_serial.o
-obj-$(CONFIG_HVC_CONSOLE)	+= hvc_vio.o hvsi.o
-obj-$(CONFIG_HVC_ISERIES)	+= hvc_iseries.o
-obj-$(CONFIG_HVC_RTAS)		+= hvc_rtas.o
-obj-$(CONFIG_HVC_TILE)		+= hvc_tile.o
-obj-$(CONFIG_HVC_DCC)		+= hvc_dcc.o
-obj-$(CONFIG_HVC_BEAT)		+= hvc_beat.o
-obj-$(CONFIG_HVC_DRIVER)	+= hvc_console.o
-obj-$(CONFIG_HVC_IRQ)		+= hvc_irq.o
-obj-$(CONFIG_HVC_XEN)		+= hvc_xen.o
-obj-$(CONFIG_HVC_IUCV)		+= hvc_iucv.o
-obj-$(CONFIG_HVC_UDBG)		+= hvc_udbg.o
 obj-$(CONFIG_VIRTIO_CONSOLE)	+= virtio_console.o
 obj-$(CONFIG_RAW_DRIVER)	+= raw.o
 obj-$(CONFIG_SGI_SNSC)		+= snsc.o snsc_event.o
@@ -48,7 +37,6 @@
 obj-$(CONFIG_MMTIMER)		+= mmtimer.o
 obj-$(CONFIG_UV_MMTIMER)	+= uv_mmtimer.o
 obj-$(CONFIG_VIOTAPE)		+= viotape.o
-obj-$(CONFIG_HVCS)		+= hvcs.o
 obj-$(CONFIG_IBM_BSR)		+= bsr.o
 obj-$(CONFIG_SGI_MBCS)		+= mbcs.o
 obj-$(CONFIG_BRIQ_PANEL)	+= briq_panel.o
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index fcd867d..d8b1b57 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -50,7 +50,7 @@
 
 config AGP_AMD
 	tristate "AMD Irongate, 761, and 762 chipset support"
-	depends on AGP && (X86_32 || ALPHA)
+	depends on AGP && X86_32
 	help
 	  This option gives you AGP support for the GLX component of
 	  X on AMD Irongate, 761, and 762 chipsets.
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index b1b4362..45681c0 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -41,22 +41,8 @@
 	if (page_map->real == NULL)
 		return -ENOMEM;
 
-#ifndef CONFIG_X86
-	SetPageReserved(virt_to_page(page_map->real));
-	global_cache_flush();
-	page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
-					    PAGE_SIZE);
-	if (page_map->remapped == NULL) {
-		ClearPageReserved(virt_to_page(page_map->real));
-		free_page((unsigned long) page_map->real);
-		page_map->real = NULL;
-		return -ENOMEM;
-	}
-	global_cache_flush();
-#else
 	set_memory_uc((unsigned long)page_map->real, 1);
 	page_map->remapped = page_map->real;
-#endif
 
 	for (i = 0; i < PAGE_SIZE / sizeof(unsigned long); i++) {
 		writel(agp_bridge->scratch_page, page_map->remapped+i);
@@ -68,12 +54,7 @@
 
 static void amd_free_page_map(struct amd_page_map *page_map)
 {
-#ifndef CONFIG_X86
-	iounmap(page_map->remapped);
-	ClearPageReserved(virt_to_page(page_map->real));
-#else
 	set_memory_wb((unsigned long)page_map->real, 1);
-#endif
 	free_page((unsigned long) page_map->real);
 }
 
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 857df10..b0a0dcc 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -774,20 +774,14 @@
 	dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
 
 	/*
-	* If the device has not been properly setup, the following will catch
-	* the problem and should stop the system from crashing.
-	* 20030610 - hamish@zot.org
-	*/
-	if (pci_enable_device(pdev)) {
-		dev_err(&pdev->dev, "can't enable PCI device\n");
-		agp_put_bridge(bridge);
-		return -ENODEV;
-	}
-
-	/*
 	* The following fixes the case where the BIOS has "forgotten" to
 	* provide an address range for the GART.
 	* 20030610 - hamish@zot.org
+	* This happens before pci_enable_device() intentionally;
+	* calling pci_enable_device() before assigning the resource
+	* will result in the GART being disabled on machines with such
+	* BIOSs (the GART ends up with a BAR starting at 0, which
+	* conflicts a lot of other devices).
 	*/
 	r = &pdev->resource[0];
 	if (!r->start && r->end) {
@@ -798,6 +792,17 @@
 		}
 	}
 
+	/*
+	* If the device has not been properly setup, the following will catch
+	* the problem and should stop the system from crashing.
+	* 20030610 - hamish@zot.org
+	*/
+	if (pci_enable_device(pdev)) {
+		dev_err(&pdev->dev, "can't enable PCI device\n");
+		agp_put_bridge(bridge);
+		return -ENODEV;
+	}
+
 	/* Fill in the mode register */
 	if (cap_ptr) {
 		pci_read_config_dword(pdev,
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 826ab09..fab3d32 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -68,6 +68,7 @@
 	phys_addr_t gma_bus_addr;
 	u32 PGETBL_save;
 	u32 __iomem *gtt;		/* I915G */
+	bool clear_fake_agp; /* on first access via agp, fill with scratch */
 	int num_dcache_entries;
 	union {
 		void __iomem *i9xx_flush_page;
@@ -869,21 +870,12 @@
 
 static int intel_fake_agp_configure(void)
 {
-	int i;
-
 	if (!intel_enable_gtt())
 	    return -EIO;
 
+	intel_private.clear_fake_agp = true;
 	agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
 
-	for (i = 0; i < intel_private.base.gtt_total_entries; i++) {
-		intel_private.driver->write_entry(intel_private.scratch_page_dma,
-						  i, 0);
-	}
-	readl(intel_private.gtt+i-1);	/* PCI Posting. */
-
-	global_cache_flush();
-
 	return 0;
 }
 
@@ -945,6 +937,13 @@
 {
 	int ret = -EINVAL;
 
+	if (intel_private.clear_fake_agp) {
+		int start = intel_private.base.stolen_size / PAGE_SIZE;
+		int end = intel_private.base.gtt_mappable_entries;
+		intel_gtt_clear_range(start, end - start);
+		intel_private.clear_fake_agp = false;
+	}
+
 	if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
 		return i810_insert_dcache_entries(mem, pg_start, type);
 
diff --git a/drivers/char/bfin_jtag_comm.c b/drivers/char/bfin_jtag_comm.c
index e397df3..1640244 100644
--- a/drivers/char/bfin_jtag_comm.c
+++ b/drivers/char/bfin_jtag_comm.c
@@ -183,16 +183,16 @@
 }
 
 #ifndef CONFIG_BFIN_JTAG_COMM_CONSOLE
-# define acquire_console_sem()
-# define release_console_sem()
+# define console_lock()
+# define console_unlock()
 #endif
 static int
 bfin_jc_write(struct tty_struct *tty, const unsigned char *buf, int count)
 {
 	int i;
-	acquire_console_sem();
+	console_lock();
 	i = bfin_jc_circ_write(buf, count);
-	release_console_sem();
+	console_unlock();
 	wake_up_process(bfin_jc_kthread);
 	return i;
 }
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index b6ae6e9..7855f9f 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -320,6 +320,7 @@
 static int add_smi(struct smi_info *smi);
 static int try_smi_init(struct smi_info *smi);
 static void cleanup_one_si(struct smi_info *to_clean);
+static void cleanup_ipmi_si(void);
 
 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
 static int register_xaction_notifier(struct notifier_block *nb)
@@ -3450,16 +3451,7 @@
 	mutex_lock(&smi_infos_lock);
 	if (unload_when_empty && list_empty(&smi_infos)) {
 		mutex_unlock(&smi_infos_lock);
-#ifdef CONFIG_PCI
-		if (pci_registered)
-			pci_unregister_driver(&ipmi_pci_driver);
-#endif
-
-#ifdef CONFIG_PPC_OF
-		if (of_registered)
-			of_unregister_platform_driver(&ipmi_of_platform_driver);
-#endif
-		driver_unregister(&ipmi_driver.driver);
+		cleanup_ipmi_si();
 		printk(KERN_WARNING PFX
 		       "Unable to find any System Interface(s)\n");
 		return -ENODEV;
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 1f46f1c..faf5a2c 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -364,12 +364,14 @@
 		    tpm_protected_ordinal_duration[ordinal &
 						   TPM_PROTECTED_ORDINAL_MASK];
 
-	if (duration_idx != TPM_UNDEFINED)
+	if (duration_idx != TPM_UNDEFINED) {
 		duration = chip->vendor.duration[duration_idx];
-	if (duration <= 0)
+		/* if duration is 0, it's because chip->vendor.duration wasn't */
+		/* filled yet, so we set the lowest timeout just to give enough */
+		/* time for tpm_get_timeouts() to succeed */
+		return (duration <= 0 ? HZ : duration);
+	} else
 		return 2 * 60 * HZ;
-	else
-		return duration;
 }
 EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
 
@@ -575,9 +577,11 @@
 	if (rc)
 		return;
 
-	if (be32_to_cpu(tpm_cmd.header.out.return_code)
-	    != 3 * sizeof(u32))
+	if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
+	    be32_to_cpu(tpm_cmd.header.out.length)
+	    != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
 		return;
+
 	duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
 	chip->vendor.duration[TPM_SHORT] =
 	    usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
@@ -937,6 +941,18 @@
 }
 EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
 
+ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct tpm_chip *chip = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%d %d %d\n",
+	               jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
+	               jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
+	               jiffies_to_usecs(chip->vendor.duration[TPM_LONG]));
+}
+EXPORT_SYMBOL_GPL(tpm_show_timeouts);
+
 ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
 			const char *buf, size_t count)
 {
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 72ddb03..d84ff77 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -56,6 +56,8 @@
 				char *);
 extern ssize_t tpm_show_temp_deactivated(struct device *,
 					 struct device_attribute *attr, char *);
+extern ssize_t tpm_show_timeouts(struct device *,
+				 struct device_attribute *attr, char *);
 
 struct tpm_chip;
 
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index c17a305..0d1d38e 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -376,6 +376,7 @@
 		   NULL);
 static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
 static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
+static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
 
 static struct attribute *tis_attrs[] = {
 	&dev_attr_pubek.attr,
@@ -385,7 +386,8 @@
 	&dev_attr_owned.attr,
 	&dev_attr_temp_deactivated.attr,
 	&dev_attr_caps.attr,
-	&dev_attr_cancel.attr, NULL,
+	&dev_attr_cancel.attr,
+	&dev_attr_timeouts.attr, NULL,
 };
 
 static struct attribute_group tis_attr_grp = {
@@ -493,9 +495,6 @@
 		 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
 		 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
 
-	if (is_itpm(to_pnp_dev(dev)))
-		itpm = 1;
-
 	if (itpm)
 		dev_info(dev, "Intel iTPM workaround enabled\n");
 
@@ -637,6 +636,9 @@
 	else
 		interrupts = 0;
 
+	if (is_itpm(pnp_dev))
+		itpm = 1;
+
 	return tpm_tis_init(&pnp_dev->dev, start, len, irq);
 }
 
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 896a2ce..4903931 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1,6 +1,7 @@
 /*
  * Copyright (C) 2006, 2007, 2009 Rusty Russell, IBM Corporation
- * Copyright (C) 2009, 2010 Red Hat, Inc.
+ * Copyright (C) 2009, 2010, 2011 Red Hat, Inc.
+ * Copyright (C) 2009, 2010, 2011 Amit Shah <amit.shah@redhat.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -31,7 +32,7 @@
 #include <linux/virtio_console.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
-#include "hvc_console.h"
+#include "../tty/hvc/hvc_console.h"
 
 /*
  * This is a global struct for storing common data for all the devices
@@ -1462,6 +1463,17 @@
 	spin_unlock(&portdev->cvq_lock);
 }
 
+static void out_intr(struct virtqueue *vq)
+{
+	struct port *port;
+
+	port = find_port_by_vq(vq->vdev->priv, vq);
+	if (!port)
+		return;
+
+	wake_up_interruptible(&port->waitqueue);
+}
+
 static void in_intr(struct virtqueue *vq)
 {
 	struct port *port;
@@ -1566,7 +1578,7 @@
 	 */
 	j = 0;
 	io_callbacks[j] = in_intr;
-	io_callbacks[j + 1] = NULL;
+	io_callbacks[j + 1] = out_intr;
 	io_names[j] = "input";
 	io_names[j + 1] = "output";
 	j += 2;
@@ -1580,7 +1592,7 @@
 		for (i = 1; i < nr_ports; i++) {
 			j += 2;
 			io_callbacks[j] = in_intr;
-			io_callbacks[j + 1] = NULL;
+			io_callbacks[j + 1] = out_intr;
 			io_names[j] = "input";
 			io_names[j + 1] = "output";
 		}
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index cfb0f52..effe797 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -202,17 +202,21 @@
 			printk(KERN_INFO "PM-Timer had inconsistent results:"
 			       " 0x%#llx, 0x%#llx - aborting.\n",
 			       value1, value2);
+			pmtmr_ioport = 0;
 			return -EINVAL;
 		}
 		if (i == ACPI_PM_READ_CHECKS) {
 			printk(KERN_INFO "PM-Timer failed consistency check "
 			       " (0x%#llx) - aborting.\n", value1);
+			pmtmr_ioport = 0;
 			return -ENODEV;
 		}
 	}
 
-	if (verify_pmtmr_rate() != 0)
+	if (verify_pmtmr_rate() != 0){
+		pmtmr_ioport = 0;
 		return -ENODEV;
+	}
 
 	return clocksource_register_hz(&clocksource_acpi_pm,
 						PMTMR_TICKS_PER_SEC);
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 01b886e..79c47e8 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -196,9 +196,9 @@
 	clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1;
 	clkevt.clkevt.cpumask = cpumask_of(0);
 
-	setup_irq(irq, &tc_irqaction);
-
 	clockevents_register_device(&clkevt.clkevt);
+
+	setup_irq(irq, &tc_irqaction);
 }
 
 #else /* !CONFIG_GENERIC_CLOCKEVENTS */
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index a8c8d9c..ca8ee80 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -71,7 +71,7 @@
 
 config CPU_FREQ_DEFAULT_GOV_POWERSAVE
 	bool "powersave"
-	depends on EMBEDDED
+	depends on EXPERT
 	select CPU_FREQ_GOV_POWERSAVE
 	help
 	  Use the CPUFreq governor 'powersave' as default. This sets
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 297f48b..07bca49 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -79,6 +79,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/delay.h>
 #include <linux/dmapool.h>
 #include <linux/dmaengine.h>
 #include <linux/amba/bus.h>
@@ -235,16 +236,19 @@
 }
 
 /*
- * Overall DMAC remains enabled always.
+ * Pause the channel by setting the HALT bit.
  *
- * Disabling individual channels could lose data.
+ * For M->P transfers, pause the DMAC first and then stop the peripheral -
+ * the FIFO can only drain if the peripheral is still requesting data.
+ * (note: this can still timeout if the DMAC FIFO never drains of data.)
  *
- * Disable the peripheral DMA after disabling the DMAC in order to allow
- * the DMAC FIFO to drain, and hence allow the channel to show inactive
+ * For P->M transfers, disable the peripheral first to stop it filling
+ * the DMAC FIFO, and then pause the DMAC.
  */
 static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
 {
 	u32 val;
+	int timeout;
 
 	/* Set the HALT bit and wait for the FIFO to drain */
 	val = readl(ch->base + PL080_CH_CONFIG);
@@ -252,8 +256,13 @@
 	writel(val, ch->base + PL080_CH_CONFIG);
 
 	/* Wait for channel inactive */
-	while (pl08x_phy_channel_busy(ch))
-		cpu_relax();
+	for (timeout = 1000; timeout; timeout--) {
+		if (!pl08x_phy_channel_busy(ch))
+			break;
+		udelay(1);
+	}
+	if (pl08x_phy_channel_busy(ch))
+		pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
 }
 
 static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
@@ -267,19 +276,24 @@
 }
 
 
-/* Stops the channel */
-static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch)
+/*
+ * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
+ * clears any pending interrupt status.  This should not be used for
+ * an on-going transfer, but as a method of shutting down a channel
+ * (eg, when it's no longer used) or terminating a transfer.
+ */
+static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
+	struct pl08x_phy_chan *ch)
 {
-	u32 val;
+	u32 val = readl(ch->base + PL080_CH_CONFIG);
 
-	pl08x_pause_phy_chan(ch);
+	val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
+	         PL080_CONFIG_TC_IRQ_MASK);
 
-	/* Disable channel */
-	val = readl(ch->base + PL080_CH_CONFIG);
-	val &= ~PL080_CONFIG_ENABLE;
-	val &= ~PL080_CONFIG_ERR_IRQ_MASK;
-	val &= ~PL080_CONFIG_TC_IRQ_MASK;
 	writel(val, ch->base + PL080_CH_CONFIG);
+
+	writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
+	writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
 }
 
 static inline u32 get_bytes_in_cctl(u32 cctl)
@@ -404,13 +418,12 @@
 {
 	unsigned long flags;
 
+	spin_lock_irqsave(&ch->lock, flags);
+
 	/* Stop the channel and clear its interrupts */
-	pl08x_stop_phy_chan(ch);
-	writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR);
-	writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR);
+	pl08x_terminate_phy_chan(pl08x, ch);
 
 	/* Mark it as free */
-	spin_lock_irqsave(&ch->lock, flags);
 	ch->serving = NULL;
 	spin_unlock_irqrestore(&ch->lock, flags);
 }
@@ -1449,7 +1462,7 @@
 		plchan->state = PL08X_CHAN_IDLE;
 
 		if (plchan->phychan) {
-			pl08x_stop_phy_chan(plchan->phychan);
+			pl08x_terminate_phy_chan(pl08x, plchan->phychan);
 
 			/*
 			 * Mark physical channel as free and free any slave
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index e53d438..e18eaab 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -49,6 +49,7 @@
 
 struct imxdma_engine {
 	struct device			*dev;
+	struct device_dma_parameters	dma_parms;
 	struct dma_device		dma_device;
 	struct imxdma_channel		channel[MAX_DMA_CHANNELS];
 };
@@ -242,6 +243,21 @@
 	else
 		dmamode = DMA_MODE_WRITE;
 
+	switch (imxdmac->word_size) {
+	case DMA_SLAVE_BUSWIDTH_4_BYTES:
+		if (sgl->length & 3 || sgl->dma_address & 3)
+			return NULL;
+		break;
+	case DMA_SLAVE_BUSWIDTH_2_BYTES:
+		if (sgl->length & 1 || sgl->dma_address & 1)
+			return NULL;
+		break;
+	case DMA_SLAVE_BUSWIDTH_1_BYTE:
+		break;
+	default:
+		return NULL;
+	}
+
 	ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len,
 		 dma_length, imxdmac->per_address, dmamode);
 	if (ret)
@@ -329,6 +345,9 @@
 
 	INIT_LIST_HEAD(&imxdma->dma_device.channels);
 
+	dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
+	dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
+
 	/* Initialize channel parameters */
 	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
 		struct imxdma_channel *imxdmac = &imxdma->channel[i];
@@ -346,11 +365,7 @@
 		imxdmac->imxdma = imxdma;
 		spin_lock_init(&imxdmac->lock);
 
-		dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
-		dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
-
 		imxdmac->chan.device = &imxdma->dma_device;
-		imxdmac->chan.chan_id = i;
 		imxdmac->channel = i;
 
 		/* Add the channel to the DMAC list */
@@ -370,6 +385,9 @@
 
 	platform_set_drvdata(pdev, imxdma);
 
+	imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
+	dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
+
 	ret = dma_async_device_register(&imxdma->dma_device);
 	if (ret) {
 		dev_err(&pdev->dev, "unable to register\n");
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index d5a5d4d..b6d1455 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -230,7 +230,7 @@
  * struct sdma_channel - housekeeping for a SDMA channel
  *
  * @sdma		pointer to the SDMA engine for this channel
- * @channel		the channel number, matches dmaengine chan_id
+ * @channel		the channel number, matches dmaengine chan_id + 1
  * @direction		transfer type. Needed for setting SDMA script
  * @peripheral_type	Peripheral type. Needed for setting SDMA script
  * @event_id0		aka dma request line
@@ -301,6 +301,7 @@
 
 struct sdma_engine {
 	struct device			*dev;
+	struct device_dma_parameters	dma_parms;
 	struct sdma_channel		channel[MAX_DMA_CHANNELS];
 	struct sdma_channel_control	*channel_control;
 	void __iomem			*regs;
@@ -449,7 +450,7 @@
 		if (bd->mode.status & BD_RROR)
 			sdmac->status = DMA_ERROR;
 		else
-			sdmac->status = DMA_SUCCESS;
+			sdmac->status = DMA_IN_PROGRESS;
 
 		bd->mode.status |= BD_DONE;
 		sdmac->buf_tail++;
@@ -770,15 +771,15 @@
 	__raw_writel(1 << channel, sdma->regs + SDMA_H_START);
 }
 
-static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma)
+static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
 {
-	dma_cookie_t cookie = sdma->chan.cookie;
+	dma_cookie_t cookie = sdmac->chan.cookie;
 
 	if (++cookie < 0)
 		cookie = 1;
 
-	sdma->chan.cookie = cookie;
-	sdma->desc.cookie = cookie;
+	sdmac->chan.cookie = cookie;
+	sdmac->desc.cookie = cookie;
 
 	return cookie;
 }
@@ -798,7 +799,7 @@
 
 	cookie = sdma_assign_cookie(sdmac);
 
-	sdma_enable_channel(sdma, tx->chan->chan_id);
+	sdma_enable_channel(sdma, sdmac->channel);
 
 	spin_unlock_irq(&sdmac->lock);
 
@@ -811,10 +812,6 @@
 	struct imx_dma_data *data = chan->private;
 	int prio, ret;
 
-	/* No need to execute this for internal channel 0 */
-	if (chan->chan_id == 0)
-		return 0;
-
 	if (!data)
 		return -EINVAL;
 
@@ -879,7 +876,7 @@
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	struct sdma_engine *sdma = sdmac->sdma;
 	int ret, i, count;
-	int channel = chan->chan_id;
+	int channel = sdmac->channel;
 	struct scatterlist *sg;
 
 	if (sdmac->status == DMA_IN_PROGRESS)
@@ -924,22 +921,33 @@
 			ret =  -EINVAL;
 			goto err_out;
 		}
-		if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
+
+		switch (sdmac->word_size) {
+		case DMA_SLAVE_BUSWIDTH_4_BYTES:
 			bd->mode.command = 0;
-		else
-			bd->mode.command = sdmac->word_size;
+			if (count & 3 || sg->dma_address & 3)
+				return NULL;
+			break;
+		case DMA_SLAVE_BUSWIDTH_2_BYTES:
+			bd->mode.command = 2;
+			if (count & 1 || sg->dma_address & 1)
+				return NULL;
+			break;
+		case DMA_SLAVE_BUSWIDTH_1_BYTE:
+			bd->mode.command = 1;
+			break;
+		default:
+			return NULL;
+		}
 
 		param = BD_DONE | BD_EXTD | BD_CONT;
 
-		if (sdmac->flags & IMX_DMA_SG_LOOP) {
+		if (i + 1 == sg_len) {
 			param |= BD_INTR;
-			if (i + 1 == sg_len)
-				param |= BD_WRAP;
+			param |= BD_LAST;
+			param &= ~BD_CONT;
 		}
 
-		if (i + 1 == sg_len)
-			param |= BD_INTR;
-
 		dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
 				i, count, sg->dma_address,
 				param & BD_WRAP ? "wrap" : "",
@@ -953,6 +961,7 @@
 
 	return &sdmac->desc;
 err_out:
+	sdmac->status = DMA_ERROR;
 	return NULL;
 }
 
@@ -963,7 +972,7 @@
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	struct sdma_engine *sdma = sdmac->sdma;
 	int num_periods = buf_len / period_len;
-	int channel = chan->chan_id;
+	int channel = sdmac->channel;
 	int ret, i = 0, buf = 0;
 
 	dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
@@ -1066,14 +1075,12 @@
 {
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	dma_cookie_t last_used;
-	enum dma_status ret;
 
 	last_used = chan->cookie;
 
-	ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used);
 	dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
 
-	return ret;
+	return sdmac->status;
 }
 
 static void sdma_issue_pending(struct dma_chan *chan)
@@ -1135,7 +1142,7 @@
 	/* download the RAM image for SDMA */
 	sdma_load_script(sdma, ram_code,
 			header->ram_code_size,
-			sdma->script_addrs->ram_code_start_addr);
+			addr->ram_code_start_addr);
 	clk_disable(sdma->clk);
 
 	sdma_add_scripts(sdma, addr);
@@ -1237,7 +1244,6 @@
 	struct resource *iores;
 	struct sdma_platform_data *pdata = pdev->dev.platform_data;
 	int i;
-	dma_cap_mask_t mask;
 	struct sdma_engine *sdma;
 
 	sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
@@ -1280,6 +1286,9 @@
 
 	sdma->version = pdata->sdma_version;
 
+	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
+	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
+
 	INIT_LIST_HEAD(&sdma->dma_device.channels);
 	/* Initialize channel parameters */
 	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
@@ -1288,15 +1297,17 @@
 		sdmac->sdma = sdma;
 		spin_lock_init(&sdmac->lock);
 
-		dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
-		dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
-
 		sdmac->chan.device = &sdma->dma_device;
-		sdmac->chan.chan_id = i;
 		sdmac->channel = i;
 
-		/* Add the channel to the DMAC list */
-		list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels);
+		/*
+		 * Add the channel to the DMAC list. Do not add channel 0 though
+		 * because we need it internally in the SDMA driver. This also means
+		 * that channel 0 in dmaengine counting matches sdma channel 1.
+		 */
+		if (i)
+			list_add_tail(&sdmac->chan.device_node,
+					&sdma->dma_device.channels);
 	}
 
 	ret = sdma_init(sdma);
@@ -1317,6 +1328,8 @@
 	sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
 	sdma->dma_device.device_control = sdma_control;
 	sdma->dma_device.device_issue_pending = sdma_issue_pending;
+	sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
+	dma_set_max_seg_size(sdma->dma_device.dev, 65535);
 
 	ret = dma_async_device_register(&sdma->dma_device);
 	if (ret) {
@@ -1324,13 +1337,6 @@
 		goto err_init;
 	}
 
-	/* request channel 0. This is an internal control channel
-	 * to the SDMA engine and not available to clients.
-	 */
-	dma_cap_zero(mask);
-	dma_cap_set(DMA_SLAVE, mask);
-	dma_request_channel(mask, NULL, NULL);
-
 	dev_info(sdma->dev, "initialized\n");
 
 	return 0;
@@ -1348,7 +1354,7 @@
 err_request_region:
 err_irq:
 	kfree(sdma);
-	return 0;
+	return ret;
 }
 
 static int __exit sdma_remove(struct platform_device *pdev)
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index cb26ee9..c1a125e 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1145,29 +1145,6 @@
 	reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
 	idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN);
 
-	/*
-	 * Problem (observed with channel DMAIC_7): after enabling the channel
-	 * and initialising buffers, there comes an interrupt with current still
-	 * pointing at buffer 0, whereas it should use buffer 0 first and only
-	 * generate an interrupt when it is done, then current should already
-	 * point to buffer 1. This spurious interrupt also comes on channel
-	 * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the
-	 * first interrupt, there comes the second with current correctly
-	 * pointing to buffer 1 this time. But sometimes this second interrupt
-	 * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling
-	 * the channel seems to prevent the channel from hanging, but it doesn't
-	 * prevent the spurious interrupt. This might also be unsafe. Think
-	 * about the IDMAC controller trying to switch to a buffer, when we
-	 * clear the ready bit, and re-enable it a moment later.
-	 */
-	reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY);
-	idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY);
-	idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY);
-
-	reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY);
-	idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY);
-	idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY);
-
 	spin_unlock_irqrestore(&ipu->lock, flags);
 
 	return 0;
@@ -1246,33 +1223,6 @@
 
 	/* Other interrupts do not interfere with this channel */
 	spin_lock(&ichan->lock);
-	if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
-		     ((curbuf >> chan_id) & 1) == ichan->active_buffer &&
-		     !list_is_last(ichan->queue.next, &ichan->queue))) {
-		int i = 100;
-
-		/* This doesn't help. See comment in ipu_disable_channel() */
-		while (--i) {
-			curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
-			if (((curbuf >> chan_id) & 1) != ichan->active_buffer)
-				break;
-			cpu_relax();
-		}
-
-		if (!i) {
-			spin_unlock(&ichan->lock);
-			dev_dbg(dev,
-				"IRQ on active buffer on channel %x, active "
-				"%d, ready %x, %x, current %x!\n", chan_id,
-				ichan->active_buffer, ready0, ready1, curbuf);
-			return IRQ_NONE;
-		} else
-			dev_dbg(dev,
-				"Buffer deactivated on channel %x, active "
-				"%d, ready %x, %x, current %x, rest %d!\n", chan_id,
-				ichan->active_buffer, ready0, ready1, curbuf, i);
-	}
-
 	if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
 		     (!ichan->active_buffer && (ready0 >> chan_id) & 1)
 		     )) {
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 4a5ecc5..23e0355 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -826,8 +826,6 @@
 /* Display and decode various NB registers for debug purposes. */
 static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
 {
-	int ganged;
-
 	debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
 
 	debugf1("  NB two channel DRAM capable: %s\n",
@@ -851,28 +849,19 @@
 	debugf1("  DramHoleValid: %s\n",
 		(pvt->dhar & DHAR_VALID) ? "yes" : "no");
 
+	amd64_debug_display_dimm_sizes(0, pvt);
+
 	/* everything below this point is Fam10h and above */
-	if (boot_cpu_data.x86 == 0xf) {
-		amd64_debug_display_dimm_sizes(0, pvt);
+	if (boot_cpu_data.x86 == 0xf)
 		return;
-	}
+
+	amd64_debug_display_dimm_sizes(1, pvt);
 
 	amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
 
 	/* Only if NOT ganged does dclr1 have valid info */
 	if (!dct_ganging_enabled(pvt))
 		amd64_dump_dramcfg_low(pvt->dclr1, 1);
-
-	/*
-	 * Determine if ganged and then dump memory sizes for first controller,
-	 * and if NOT ganged dump info for 2nd controller.
-	 */
-	ganged = dct_ganging_enabled(pvt);
-
-	amd64_debug_display_dimm_sizes(0, pvt);
-
-	if (!ganged)
-		amd64_debug_display_dimm_sizes(1, pvt);
 }
 
 /* Read in both of DBAM registers */
@@ -1644,11 +1633,10 @@
 		       WARN_ON(ctrl != 0);
 	}
 
-	debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
-		ctrl, ctrl ? pvt->dbam1 : pvt->dbam0);
+	dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
+	dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dcsb1 : pvt->dcsb0;
 
-	dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
-	dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
+	debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
 
 	edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
 
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 68f942c..0c56989 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -49,15 +49,13 @@
 	  configuration section.
 
 config FIREWIRE_NET
-	tristate "IP networking over 1394 (EXPERIMENTAL)"
-	depends on FIREWIRE && INET && EXPERIMENTAL
+	tristate "IP networking over 1394"
+	depends on FIREWIRE && INET
 	help
 	  This enables IPv4 over IEEE 1394, providing IP connectivity with
 	  other implementations of RFC 2734 as found on several operating
 	  systems.  Multicast support is currently limited.
 
-	  NOTE, this driver is not stable yet!
-
 	  To compile this driver as a module, say M here:  The module will be
 	  called firewire-net.
 
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index be04923..24ff355 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -75,6 +75,8 @@
 #define BIB_IRMC		((1) << 31)
 #define NODE_CAPABILITIES	0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
 
+#define CANON_OUI		0x000085
+
 static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
 {
 	struct fw_descriptor *desc;
@@ -284,6 +286,7 @@
 	bool root_device_is_running;
 	bool root_device_is_cmc;
 	bool irm_is_1394_1995_only;
+	bool keep_this_irm;
 
 	spin_lock_irq(&card->lock);
 
@@ -305,6 +308,10 @@
 	irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
 			(irm_device->config_rom[2] & 0x000000f0) == 0;
 
+	/* Canon MV5i works unreliably if it is not root node. */
+	keep_this_irm = irm_device && irm_device->config_rom &&
+			irm_device->config_rom[3] >> 8 == CANON_OUI;
+
 	root_id  = root_node->node_id;
 	irm_id   = card->irm_node->node_id;
 	local_id = card->local_node->node_id;
@@ -333,7 +340,7 @@
 			goto pick_me;
 		}
 
-		if (irm_is_1394_1995_only) {
+		if (irm_is_1394_1995_only && !keep_this_irm) {
 			new_root_id = local_id;
 			fw_notify("%s, making local node (%02x) root.\n",
 				  "IRM is not 1394a compliant", new_root_id);
@@ -382,7 +389,7 @@
 
 		spin_lock_irq(&card->lock);
 
-		if (rcode != RCODE_COMPLETE) {
+		if (rcode != RCODE_COMPLETE && !keep_this_irm) {
 			/*
 			 * The lock request failed, maybe the IRM
 			 * isn't really IRM capable after all. Let's
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index c2e194c..7ed08fd 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -191,6 +191,7 @@
 	struct fwnet_device *dev;
 	u64 guid;
 	u64 fifo;
+	__be32 ip;
 
 	/* guarded by dev->lock */
 	struct list_head pd_list; /* received partial datagrams */
@@ -570,6 +571,8 @@
 				peer->speed = sspd;
 			if (peer->max_payload > max_payload)
 				peer->max_payload = max_payload;
+
+			peer->ip = arp1394->sip;
 		}
 		spin_unlock_irqrestore(&dev->lock, flags);
 
@@ -1470,6 +1473,7 @@
 	peer->dev = dev;
 	peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
 	peer->fifo = FWNET_NO_FIFO_ADDR;
+	peer->ip = 0;
 	INIT_LIST_HEAD(&peer->pd_list);
 	peer->pdg_size = 0;
 	peer->datagram_label = 0;
@@ -1589,10 +1593,13 @@
 
 	mutex_lock(&fwnet_device_mutex);
 
+	net = dev->netdev;
+	if (net && peer->ip)
+		arp_invalidate(net, peer->ip);
+
 	fwnet_remove_peer(peer, dev);
 
 	if (list_empty(&dev->peer_list)) {
-		net = dev->netdev;
 		unregister_netdev(net);
 
 		if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index e8b6a13..e710424 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -27,7 +27,7 @@
 	  using the kernel parameter 'edd={on|skipmbr|off}'.
 
 config FIRMWARE_MEMMAP
-    bool "Add firmware-provided memory map to sysfs" if EMBEDDED
+    bool "Add firmware-provided memory map to sysfs" if EXPERT
     default X86
     help
       Add the firmware-provided (unmodified) memory map to /sys/firmware/memmap.
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index e28e41668..bcb1126 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -378,10 +378,17 @@
 
 static void __init dmi_dump_ids(void)
 {
+	const char *board;	/* Board Name is optional */
+
 	printk(KERN_DEBUG "DMI: ");
-	print_filtered(dmi_get_system_info(DMI_BOARD_NAME));
-	printk(KERN_CONT "/");
+	print_filtered(dmi_get_system_info(DMI_SYS_VENDOR));
+	printk(KERN_CONT " ");
 	print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME));
+	board = dmi_get_system_info(DMI_BOARD_NAME);
+	if (board) {
+		printk(KERN_CONT "/");
+		print_filtered(board);
+	}
 	printk(KERN_CONT ", BIOS ");
 	print_filtered(dmi_get_system_info(DMI_BIOS_VERSION));
 	printk(KERN_CONT " ");
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index d81cc74..54d70a4 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -187,7 +187,7 @@
 
 static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
 {
-	struct lnw_gpio *lnw = (struct lnw_gpio *)get_irq_data(irq);
+	struct lnw_gpio *lnw = get_irq_data(irq);
 	u32 base, gpio;
 	void __iomem *gedr;
 	u32 gedr_v;
@@ -206,7 +206,12 @@
 		/* clear the edge detect status bit */
 		writel(gedr_v, gedr);
 	}
-	desc->chip->eoi(irq);
+
+	if (desc->chip->irq_eoi)
+		desc->chip->irq_eoi(irq_get_irq_data(irq));
+	else
+		dev_warn(lnw->chip.dev, "missing EOI handler for irq %d\n", irq);
+
 }
 
 static int __devinit lnw_gpio_probe(struct pci_dev *pdev,
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index a261972..b473429e 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -60,6 +60,7 @@
 	unsigned gpio_start;
 	uint16_t reg_output;
 	uint16_t reg_direction;
+	struct mutex i2c_lock;
 
 #ifdef CONFIG_GPIO_PCA953X_IRQ
 	struct mutex irq_lock;
@@ -119,13 +120,17 @@
 
 	chip = container_of(gc, struct pca953x_chip, gpio_chip);
 
+	mutex_lock(&chip->i2c_lock);
 	reg_val = chip->reg_direction | (1u << off);
 	ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
 	if (ret)
-		return ret;
+		goto exit;
 
 	chip->reg_direction = reg_val;
-	return 0;
+	ret = 0;
+exit:
+	mutex_unlock(&chip->i2c_lock);
+	return ret;
 }
 
 static int pca953x_gpio_direction_output(struct gpio_chip *gc,
@@ -137,6 +142,7 @@
 
 	chip = container_of(gc, struct pca953x_chip, gpio_chip);
 
+	mutex_lock(&chip->i2c_lock);
 	/* set output level */
 	if (val)
 		reg_val = chip->reg_output | (1u << off);
@@ -145,7 +151,7 @@
 
 	ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
 	if (ret)
-		return ret;
+		goto exit;
 
 	chip->reg_output = reg_val;
 
@@ -153,10 +159,13 @@
 	reg_val = chip->reg_direction & ~(1u << off);
 	ret = pca953x_write_reg(chip, PCA953X_DIRECTION, reg_val);
 	if (ret)
-		return ret;
+		goto exit;
 
 	chip->reg_direction = reg_val;
-	return 0;
+	ret = 0;
+exit:
+	mutex_unlock(&chip->i2c_lock);
+	return ret;
 }
 
 static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
@@ -167,7 +176,9 @@
 
 	chip = container_of(gc, struct pca953x_chip, gpio_chip);
 
+	mutex_lock(&chip->i2c_lock);
 	ret = pca953x_read_reg(chip, PCA953X_INPUT, &reg_val);
+	mutex_unlock(&chip->i2c_lock);
 	if (ret < 0) {
 		/* NOTE:  diagnostic already emitted; that's all we should
 		 * do unless gpio_*_value_cansleep() calls become different
@@ -187,6 +198,7 @@
 
 	chip = container_of(gc, struct pca953x_chip, gpio_chip);
 
+	mutex_lock(&chip->i2c_lock);
 	if (val)
 		reg_val = chip->reg_output | (1u << off);
 	else
@@ -194,9 +206,11 @@
 
 	ret = pca953x_write_reg(chip, PCA953X_OUTPUT, reg_val);
 	if (ret)
-		return;
+		goto exit;
 
 	chip->reg_output = reg_val;
+exit:
+	mutex_unlock(&chip->i2c_lock);
 }
 
 static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
@@ -517,6 +531,8 @@
 
 	chip->names = pdata->names;
 
+	mutex_init(&chip->i2c_lock);
+
 	/* initialize cached registers from their original values.
 	 * we can't share this chip with another i2c master.
 	 */
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 64828a7..0902d44 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -23,7 +23,7 @@
 	tristate
 	depends on DRM
 	select FB
-	select FRAMEBUFFER_CONSOLE if !EMBEDDED
+	select FRAMEBUFFER_CONSOLE if !EXPERT
 	help
 	  FB and CRTC helpers for KMS drivers.
 
@@ -100,7 +100,10 @@
 config DRM_I915
 	tristate "i915 driver"
 	depends on AGP_INTEL
+	# we need shmfs for the swappable backing store, and in particular
+	# the shmem_readpage() which depends upon tmpfs
 	select SHMEM
+	select TMPFS
 	select DRM_KMS_HELPER
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 2baa670..654faa8 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2674,3 +2674,23 @@
 	mutex_unlock(&dev->mode_config.mutex);
 	return ret;
 }
+
+void drm_mode_config_reset(struct drm_device *dev)
+{
+	struct drm_crtc *crtc;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+
+	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+		if (crtc->funcs->reset)
+			crtc->funcs->reset(crtc);
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+		if (encoder->funcs->reset)
+			encoder->funcs->reset(encoder);
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		if (connector->funcs->reset)
+			connector->funcs->reset(connector);
+}
+EXPORT_SYMBOL(drm_mode_config_reset);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 952b3d4..9236965 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -343,13 +343,12 @@
 	struct drm_encoder *encoder;
 	bool ret = true;
 
-	adjusted_mode = drm_mode_duplicate(dev, mode);
-
 	crtc->enabled = drm_helper_crtc_in_use(crtc);
-
 	if (!crtc->enabled)
 		return true;
 
+	adjusted_mode = drm_mode_duplicate(dev, mode);
+
 	saved_hwmode = crtc->hwmode;
 	saved_mode = crtc->mode;
 	saved_x = crtc->x;
@@ -437,10 +436,9 @@
 	 */
 	drm_calc_timestamping_constants(crtc);
 
-	/* XXX free adjustedmode */
-	drm_mode_destroy(dev, adjusted_mode);
 	/* FIXME: add subpixel order */
 done:
+	drm_mode_destroy(dev, adjusted_mode);
 	if (!ret) {
 		crtc->hwmode = saved_hwmode;
 		crtc->mode = saved_mode;
@@ -497,14 +495,17 @@
 
 	crtc_funcs = set->crtc->helper_private;
 
+	if (!set->mode)
+		set->fb = NULL;
+
 	if (set->fb) {
 		DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
 				set->crtc->base.id, set->fb->base.id,
 				(int)set->num_connectors, set->x, set->y);
 	} else {
-		DRM_DEBUG_KMS("[CRTC:%d] [NOFB] #connectors=%d (x y) (%i %i)\n",
-				set->crtc->base.id, (int)set->num_connectors,
-				set->x, set->y);
+		DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
+		set->mode = NULL;
+		set->num_connectors = 0;
 	}
 
 	dev = set->crtc->dev;
@@ -649,8 +650,8 @@
 		mode_changed = true;
 
 	if (mode_changed) {
-		set->crtc->enabled = (set->mode != NULL);
-		if (set->mode != NULL) {
+		set->crtc->enabled = drm_helper_crtc_in_use(set->crtc);
+		if (set->crtc->enabled) {
 			DRM_DEBUG_KMS("attempting to set mode from"
 					" userspace\n");
 			drm_mode_debug_printmodeline(set->mode);
@@ -665,6 +666,12 @@
 				ret = -EINVAL;
 				goto fail;
 			}
+			DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+			for (i = 0; i < set->num_connectors; i++) {
+				DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+					      drm_get_connector_name(set->connectors[i]));
+				set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
+			}
 		}
 		drm_helper_disable_unused_functions(dev);
 	} else if (fb_changed) {
@@ -681,12 +688,6 @@
 			goto fail;
 		}
 	}
-	DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
-	for (i = 0; i < set->num_connectors; i++) {
-		DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
-			      drm_get_connector_name(set->connectors[i]));
-		set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
-	}
 
 	kfree(save_connectors);
 	kfree(save_encoders);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 5c4f9b9..6977a1c 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1533,11 +1533,11 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
 
-/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EMBEDDED)
+/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
  * but the module doesn't depend on any fb console symbols.  At least
  * attempt to load fbcon to avoid leaving the system without a usable console.
  */
-#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EMBEDDED)
+#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
 static int __init drm_fb_helper_modinit(void)
 {
 	const char *name = "fbcon";
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 3cdbaf3..be9a9c0 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -283,17 +283,18 @@
 #endif
 
 	mutex_lock(&dev->struct_mutex);
-	seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
+	seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n",
 		   atomic_read(&dev->vma_count),
-		   high_memory, (u64)virt_to_phys(high_memory));
+		   high_memory, (void *)virt_to_phys(high_memory));
 
 	list_for_each_entry(pt, &dev->vmalist, head) {
 		vma = pt->vma;
 		if (!vma)
 			continue;
 		seq_printf(m,
-			   "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
-			   pt->pid, vma->vm_start, vma->vm_end,
+			   "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
+			   pt->pid,
+			   (void *)vma->vm_start, (void *)vma->vm_end,
 			   vma->vm_flags & VM_READ ? 'r' : '-',
 			   vma->vm_flags & VM_WRITE ? 'w' : '-',
 			   vma->vm_flags & VM_EXEC ? 'x' : '-',
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0054e95..3dadfa2 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1250,7 +1250,7 @@
  * Drivers should call this routine in their vblank interrupt handlers to
  * update the vblank counter and send any signals that may be pending.
  */
-void drm_handle_vblank(struct drm_device *dev, int crtc)
+bool drm_handle_vblank(struct drm_device *dev, int crtc)
 {
 	u32 vblcount;
 	s64 diff_ns;
@@ -1258,7 +1258,7 @@
 	unsigned long irqflags;
 
 	if (!dev->num_crtcs)
-		return;
+		return false;
 
 	/* Need timestamp lock to prevent concurrent execution with
 	 * vblank enable/disable, as this would cause inconsistent
@@ -1269,7 +1269,7 @@
 	/* Vblank irq handling disabled. Nothing to do. */
 	if (!dev->vblank_enabled[crtc]) {
 		spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
-		return;
+		return false;
 	}
 
 	/* Fetch corresponding timestamp for this vblank interval from
@@ -1311,5 +1311,6 @@
 	drm_handle_vblank_events(dev, crtc);
 
 	spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+	return true;
 }
 EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 844f3c9..17bd766 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -152,7 +152,7 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-	struct intel_ring_buffer *ring = LP_RING(dev_priv);
+	int ret;
 
 	master_priv->sarea = drm_getsarea(dev);
 	if (master_priv->sarea) {
@@ -163,33 +163,22 @@
 	}
 
 	if (init->ring_size != 0) {
-		if (ring->obj != NULL) {
+		if (LP_RING(dev_priv)->obj != NULL) {
 			i915_dma_cleanup(dev);
 			DRM_ERROR("Client tried to initialize ringbuffer in "
 				  "GEM mode\n");
 			return -EINVAL;
 		}
 
-		ring->size = init->ring_size;
-
-		ring->map.offset = init->ring_start;
-		ring->map.size = init->ring_size;
-		ring->map.type = 0;
-		ring->map.flags = 0;
-		ring->map.mtrr = 0;
-
-		drm_core_ioremap_wc(&ring->map, dev);
-
-		if (ring->map.handle == NULL) {
+		ret = intel_render_ring_init_dri(dev,
+						 init->ring_start,
+						 init->ring_size);
+		if (ret) {
 			i915_dma_cleanup(dev);
-			DRM_ERROR("can not ioremap virtual address for"
-				  " ring buffer\n");
-			return -ENOMEM;
+			return ret;
 		}
 	}
 
-	ring->virtual_start = ring->map.handle;
-
 	dev_priv->cpp = init->cpp;
 	dev_priv->back_offset = init->back_offset;
 	dev_priv->front_offset = init->front_offset;
@@ -1226,9 +1215,15 @@
 	if (ret)
 		DRM_INFO("failed to find VBIOS tables\n");
 
-	/* if we have > 1 VGA cards, then disable the radeon VGA resources */
+	/* If we have > 1 VGA cards, then we need to arbitrate access
+	 * to the common VGA resources.
+	 *
+	 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
+	 * then we do not take part in VGA arbitration and the
+	 * vga_client_register() fails with -ENODEV.
+	 */
 	ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
-	if (ret)
+	if (ret && ret != -ENODEV)
 		goto cleanup_ringbuffer;
 
 	intel_register_dsm_handler();
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 72fea2b..0ad533f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -46,6 +46,9 @@
 unsigned int i915_powersave = 1;
 module_param_named(powersave, i915_powersave, int, 0600);
 
+unsigned int i915_enable_rc6 = 0;
+module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
+
 unsigned int i915_lvds_downclock = 0;
 module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
 
@@ -60,7 +63,7 @@
 
 #define INTEL_VGA_DEVICE(id, info) {		\
 	.class = PCI_CLASS_DISPLAY_VGA << 8,	\
-	.class_mask = 0xffff00,			\
+	.class_mask = 0xff0000,			\
 	.vendor = 0x8086,			\
 	.device = id,				\
 	.subvendor = PCI_ANY_ID,		\
@@ -354,12 +357,13 @@
 		error = i915_gem_init_ringbuffer(dev);
 		mutex_unlock(&dev->struct_mutex);
 
+		drm_mode_config_reset(dev);
 		drm_irq_install(dev);
 
 		/* Resume the modeset for every activated CRTC */
 		drm_helper_resume_force_mode(dev);
 
-		if (dev_priv->renderctx && dev_priv->pwrctx)
+		if (IS_IRONLAKE_M(dev))
 			ironlake_enable_rc6(dev);
 	}
 
@@ -542,6 +546,7 @@
 
 		mutex_unlock(&dev->struct_mutex);
 		drm_irq_uninstall(dev);
+		drm_mode_config_reset(dev);
 		drm_irq_install(dev);
 		mutex_lock(&dev->struct_mutex);
 	}
@@ -566,6 +571,14 @@
 static int __devinit
 i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
+	/* Only bind to function 0 of the device. Early generations
+	 * used function 1 as a placeholder for multi-head. This causes
+	 * us confusion instead, especially on the systems where both
+	 * functions have the same PCI-ID!
+	 */
+	if (PCI_FUNC(pdev->devfn))
+		return -ENODEV;
+
 	return drm_get_pci_dev(pdev, ent, &driver);
 }
 
@@ -752,6 +765,9 @@
 		driver.driver_features &= ~DRIVER_MODESET;
 #endif
 
+	if (!(driver.driver_features & DRIVER_MODESET))
+		driver.get_vblank_timestamp = NULL;
+
 	return drm_init(&driver);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 5969f46..65dfe81 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -543,8 +543,11 @@
 		/** List of all objects in gtt_space. Used to restore gtt
 		 * mappings on resume */
 		struct list_head gtt_list;
-		/** End of mappable part of GTT */
+
+		/** Usable portion of the GTT for GEM */
+		unsigned long gtt_start;
 		unsigned long gtt_mappable_end;
+		unsigned long gtt_end;
 
 		struct io_mapping *gtt_mapping;
 		int gtt_mtrr;
@@ -955,6 +958,7 @@
 extern unsigned int i915_powersave;
 extern unsigned int i915_lvds_downclock;
 extern unsigned int i915_panel_use_ssc;
+extern unsigned int i915_enable_rc6;
 
 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
 extern int i915_resume(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3dfc848..cf4f74c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -140,12 +140,16 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
-	drm_mm_init(&dev_priv->mm.gtt_space, start,
-		    end - start);
+	drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
 
+	dev_priv->mm.gtt_start = start;
+	dev_priv->mm.gtt_mappable_end = mappable_end;
+	dev_priv->mm.gtt_end = end;
 	dev_priv->mm.gtt_total = end - start;
 	dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
-	dev_priv->mm.gtt_mappable_end = mappable_end;
+
+	/* Take over this portion of the GTT */
+	intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
 }
 
 int
@@ -1857,7 +1861,7 @@
 
 	seqno = ring->get_seqno(ring);
 
-	for (i = 0; i < I915_NUM_RINGS; i++)
+	for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
 		if (seqno >= ring->sync_seqno[i])
 			ring->sync_seqno[i] = 0;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index dcfdf41..d2f445e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1175,7 +1175,7 @@
 		goto err;
 
 	seqno = i915_gem_next_request_seqno(dev, ring);
-	for (i = 0; i < I915_NUM_RINGS-1; i++) {
+	for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
 		if (seqno < ring->sync_seqno[i]) {
 			/* The GPU can not handle its semaphore value wrapping,
 			 * so every billion or so execbuffers, we need to stall
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 70433ae..b0abdc6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -34,6 +34,10 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
 
+	/* First fill our portion of the GTT with scratch pages */
+	intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
+			      (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
+
 	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
 		i915_gem_clflush_object(obj);
 
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b8e509a..97f946dc 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -274,24 +274,35 @@
 	return ret;
 }
 
-int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
+int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
 			      int *max_error,
 			      struct timeval *vblank_time,
 			      unsigned flags)
 {
-	struct drm_crtc *drmcrtc;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_crtc *crtc;
 
-	if (crtc < 0 || crtc >= dev->num_crtcs) {
-		DRM_ERROR("Invalid crtc %d\n", crtc);
+	if (pipe < 0 || pipe >= dev_priv->num_pipe) {
+		DRM_ERROR("Invalid crtc %d\n", pipe);
 		return -EINVAL;
 	}
 
 	/* Get drm_crtc to timestamp: */
-	drmcrtc = intel_get_crtc_for_pipe(dev, crtc);
+	crtc = intel_get_crtc_for_pipe(dev, pipe);
+	if (crtc == NULL) {
+		DRM_ERROR("Invalid crtc %d\n", pipe);
+		return -EINVAL;
+	}
+
+	if (!crtc->enabled) {
+		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
+		return -EBUSY;
+	}
 
 	/* Helper routine in DRM core does all the work: */
-	return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
-						     vblank_time, flags, drmcrtc);
+	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
+						     vblank_time, flags,
+						     crtc);
 }
 
 /*
@@ -348,8 +359,12 @@
 			struct intel_ring_buffer *ring)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 seqno = ring->get_seqno(ring);
+	u32 seqno;
 
+	if (ring->obj == NULL)
+		return;
+
+	seqno = ring->get_seqno(ring);
 	trace_i915_gem_request_complete(dev, seqno);
 
 	ring->irq_seqno = seqno;
@@ -831,6 +846,8 @@
 		i++;
 	error->pinned_bo_count = i - error->active_bo_count;
 
+	error->active_bo = NULL;
+	error->pinned_bo = NULL;
 	if (i) {
 		error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
 					   GFP_ATOMIC);
@@ -1179,18 +1196,18 @@
 				intel_finish_page_flip_plane(dev, 1);
 		}
 
-		if (pipea_stats & vblank_status) {
+		if (pipea_stats & vblank_status &&
+		    drm_handle_vblank(dev, 0)) {
 			vblank++;
-			drm_handle_vblank(dev, 0);
 			if (!dev_priv->flip_pending_is_done) {
 				i915_pageflip_stall_check(dev, 0);
 				intel_finish_page_flip(dev, 0);
 			}
 		}
 
-		if (pipeb_stats & vblank_status) {
+		if (pipeb_stats & vblank_status &&
+		    drm_handle_vblank(dev, 1)) {
 			vblank++;
-			drm_handle_vblank(dev, 1);
 			if (!dev_priv->flip_pending_is_done) {
 				i915_pageflip_stall_check(dev, 1);
 				intel_finish_page_flip(dev, 1);
@@ -1278,12 +1295,12 @@
 	if (master_priv->sarea_priv)
 		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 
-	ret = -ENODEV;
 	if (ring->irq_get(ring)) {
 		DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
 			    READ_BREADCRUMB(dev_priv) >= irq_nr);
 		ring->irq_put(ring);
-	}
+	} else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
+		ret = -EBUSY;
 
 	if (ret == -EBUSY) {
 		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 40a407f..15d94c6 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -174,7 +174,9 @@
  *   address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
  */
 #define MI_LOAD_REGISTER_IMM(x)	MI_INSTR(0x22, 2*x-1)
-#define MI_FLUSH_DW		MI_INSTR(0x26, 2) /* for GEN6 */
+#define MI_FLUSH_DW		MI_INSTR(0x26, 1) /* for GEN6 */
+#define   MI_INVALIDATE_TLB	(1<<18)
+#define   MI_INVALIDATE_BSD	(1<<7)
 #define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
 #define   MI_BATCH_NON_SECURE	(1)
 #define   MI_BATCH_NON_SECURE_I965 (1<<8)
@@ -513,6 +515,10 @@
 #define   GEN6_BLITTER_SYNC_STATUS			(1 << 24)
 #define   GEN6_BLITTER_USER_INTERRUPT			(1 << 22)
 
+#define GEN6_BLITTER_ECOSKPD	0x221d0
+#define   GEN6_BLITTER_LOCK_SHIFT			16
+#define   GEN6_BLITTER_FBC_NOTIFY			(1<<3)
+
 #define GEN6_BSD_SLEEP_PSMI_CONTROL	0x12050
 #define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK	(1 << 16)
 #define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE		(1 << 0)
@@ -2626,6 +2632,8 @@
 #define DISPLAY_PORT_PLL_BIOS_2         0x46014
 
 #define PCH_DSPCLK_GATE_D	0x42020
+# define DPFCUNIT_CLOCK_GATE_DISABLE		(1 << 9)
+# define DPFCRUNIT_CLOCK_GATE_DISABLE		(1 << 8)
 # define DPFDUNIT_CLOCK_GATE_DISABLE		(1 << 7)
 # define DPARBUNIT_CLOCK_GATE_DISABLE		(1 << 5)
 
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 17035b8..8a77ff4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -535,6 +535,15 @@
 	return 0;
 }
 
+static void intel_crt_reset(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct intel_crt *crt = intel_attached_crt(connector);
+
+	if (HAS_PCH_SPLIT(dev))
+		crt->force_hotplug_required = 1;
+}
+
 /*
  * Routines for controlling stuff on the analog port
  */
@@ -548,6 +557,7 @@
 };
 
 static const struct drm_connector_funcs intel_crt_connector_funcs = {
+	.reset = intel_crt_reset,
 	.dpms = drm_helper_connector_dpms,
 	.detect = intel_crt_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 98967f3..3b00653 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1213,6 +1213,26 @@
 	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
+static void sandybridge_blit_fbc_update(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 blt_ecoskpd;
+
+	/* Make sure blitter notifies FBC of writes */
+	__gen6_force_wake_get(dev_priv);
+	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
+	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
+		GEN6_BLITTER_LOCK_SHIFT;
+	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
+	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+	blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
+			 GEN6_BLITTER_LOCK_SHIFT);
+	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
+	POSTING_READ(GEN6_BLITTER_ECOSKPD);
+	__gen6_force_wake_put(dev_priv);
+}
+
 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 {
 	struct drm_device *dev = crtc->dev;
@@ -1266,6 +1286,7 @@
 		I915_WRITE(SNB_DPFC_CTL_SA,
 			   SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
 		I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+		sandybridge_blit_fbc_update(dev);
 	}
 
 	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
@@ -5530,6 +5551,16 @@
 	return ret;
 }
 
+static void intel_crtc_reset(struct drm_crtc *crtc)
+{
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+	/* Reset flags back to the 'unknown' status so that they
+	 * will be correctly set on the initial modeset.
+	 */
+	intel_crtc->dpms_mode = -1;
+}
+
 static struct drm_crtc_helper_funcs intel_helper_funcs = {
 	.dpms = intel_crtc_dpms,
 	.mode_fixup = intel_crtc_mode_fixup,
@@ -5541,6 +5572,7 @@
 };
 
 static const struct drm_crtc_funcs intel_crtc_funcs = {
+	.reset = intel_crtc_reset,
 	.cursor_set = intel_crtc_cursor_set,
 	.cursor_move = intel_crtc_cursor_move,
 	.gamma_set = intel_crtc_gamma_set,
@@ -5631,8 +5663,7 @@
 	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
 	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
 
-	intel_crtc->cursor_addr = 0;
-	intel_crtc->dpms_mode = -1;
+	intel_crtc_reset(&intel_crtc->base);
 	intel_crtc->active = true; /* force the pipe off on setup_init_config */
 
 	if (HAS_PCH_SPLIT(dev)) {
@@ -6286,7 +6317,9 @@
 
 		if (IS_GEN5(dev)) {
 			/* Required for FBC */
-			dspclk_gate |= DPFDUNIT_CLOCK_GATE_DISABLE;
+			dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
+				DPFCRUNIT_CLOCK_GATE_DISABLE |
+				DPFDUNIT_CLOCK_GATE_DISABLE;
 			/* Required for CxSR */
 			dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
 
@@ -6429,29 +6462,19 @@
 	}
 }
 
-void intel_disable_clock_gating(struct drm_device *dev)
+static void ironlake_teardown_rc6(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	if (dev_priv->renderctx) {
-		struct drm_i915_gem_object *obj = dev_priv->renderctx;
-
-		I915_WRITE(CCID, 0);
-		POSTING_READ(CCID);
-
-		i915_gem_object_unpin(obj);
-		drm_gem_object_unreference(&obj->base);
+		i915_gem_object_unpin(dev_priv->renderctx);
+		drm_gem_object_unreference(&dev_priv->renderctx->base);
 		dev_priv->renderctx = NULL;
 	}
 
 	if (dev_priv->pwrctx) {
-		struct drm_i915_gem_object *obj = dev_priv->pwrctx;
-
-		I915_WRITE(PWRCTXA, 0);
-		POSTING_READ(PWRCTXA);
-
-		i915_gem_object_unpin(obj);
-		drm_gem_object_unreference(&obj->base);
+		i915_gem_object_unpin(dev_priv->pwrctx);
+		drm_gem_object_unreference(&dev_priv->pwrctx->base);
 		dev_priv->pwrctx = NULL;
 	}
 }
@@ -6460,21 +6483,39 @@
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
-	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
-	wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
-		 10);
-	POSTING_READ(CCID);
-	I915_WRITE(PWRCTXA, 0);
-	POSTING_READ(PWRCTXA);
-	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
-	POSTING_READ(RSTDBYCTL);
-	i915_gem_object_unpin(dev_priv->renderctx);
-	drm_gem_object_unreference(&dev_priv->renderctx->base);
-	dev_priv->renderctx = NULL;
-	i915_gem_object_unpin(dev_priv->pwrctx);
-	drm_gem_object_unreference(&dev_priv->pwrctx->base);
-	dev_priv->pwrctx = NULL;
+	if (I915_READ(PWRCTXA)) {
+		/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
+		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
+		wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
+			 50);
+
+		I915_WRITE(PWRCTXA, 0);
+		POSTING_READ(PWRCTXA);
+
+		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+		POSTING_READ(RSTDBYCTL);
+	}
+
+	ironlake_disable_rc6(dev);
+}
+
+static int ironlake_setup_rc6(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->renderctx == NULL)
+		dev_priv->renderctx = intel_alloc_context_page(dev);
+	if (!dev_priv->renderctx)
+		return -ENOMEM;
+
+	if (dev_priv->pwrctx == NULL)
+		dev_priv->pwrctx = intel_alloc_context_page(dev);
+	if (!dev_priv->pwrctx) {
+		ironlake_teardown_rc6(dev);
+		return -ENOMEM;
+	}
+
+	return 0;
 }
 
 void ironlake_enable_rc6(struct drm_device *dev)
@@ -6482,15 +6523,26 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
 
+	/* rc6 disabled by default due to repeated reports of hanging during
+	 * boot and resume.
+	 */
+	if (!i915_enable_rc6)
+		return;
+
+	ret = ironlake_setup_rc6(dev);
+	if (ret)
+		return;
+
 	/*
 	 * GPU can automatically power down the render unit if given a page
 	 * to save state.
 	 */
 	ret = BEGIN_LP_RING(6);
 	if (ret) {
-		ironlake_disable_rc6(dev);
+		ironlake_teardown_rc6(dev);
 		return;
 	}
+
 	OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
 	OUT_RING(MI_SET_CONTEXT);
 	OUT_RING(dev_priv->renderctx->gtt_offset |
@@ -6507,6 +6559,7 @@
 	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
 }
 
+
 /* Set up chip specific display functions */
 static void intel_init_display(struct drm_device *dev)
 {
@@ -6749,21 +6802,9 @@
 	if (IS_GEN6(dev))
 		gen6_enable_rps(dev_priv);
 
-	if (IS_IRONLAKE_M(dev)) {
-		dev_priv->renderctx = intel_alloc_context_page(dev);
-		if (!dev_priv->renderctx)
-			goto skip_rc6;
-		dev_priv->pwrctx = intel_alloc_context_page(dev);
-		if (!dev_priv->pwrctx) {
-			i915_gem_object_unpin(dev_priv->renderctx);
-			drm_gem_object_unreference(&dev_priv->renderctx->base);
-			dev_priv->renderctx = NULL;
-			goto skip_rc6;
-		}
+	if (IS_IRONLAKE_M(dev))
 		ironlake_enable_rc6(dev);
-	}
 
-skip_rc6:
 	INIT_WORK(&dev_priv->idle_work, intel_idle_update);
 	setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
 		    (unsigned long)dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1f4242b..51cb4e3 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1639,6 +1639,24 @@
 	return 0;
 }
 
+static bool
+intel_dp_detect_audio(struct drm_connector *connector)
+{
+	struct intel_dp *intel_dp = intel_attached_dp(connector);
+	struct edid *edid;
+	bool has_audio = false;
+
+	edid = drm_get_edid(connector, &intel_dp->adapter);
+	if (edid) {
+		has_audio = drm_detect_monitor_audio(edid);
+
+		connector->display_info.raw_edid = NULL;
+		kfree(edid);
+	}
+
+	return has_audio;
+}
+
 static int
 intel_dp_set_property(struct drm_connector *connector,
 		      struct drm_property *property,
@@ -1652,17 +1670,23 @@
 		return ret;
 
 	if (property == intel_dp->force_audio_property) {
-		if (val == intel_dp->force_audio)
+		int i = val;
+		bool has_audio;
+
+		if (i == intel_dp->force_audio)
 			return 0;
 
-		intel_dp->force_audio = val;
+		intel_dp->force_audio = i;
 
-		if (val > 0 && intel_dp->has_audio)
-			return 0;
-		if (val < 0 && !intel_dp->has_audio)
+		if (i == 0)
+			has_audio = intel_dp_detect_audio(connector);
+		else
+			has_audio = i > 0;
+
+		if (has_audio == intel_dp->has_audio)
 			return 0;
 
-		intel_dp->has_audio = val > 0;
+		intel_dp->has_audio = has_audio;
 		goto done;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 74db255..2c43104 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -298,7 +298,6 @@
 extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
 				    u16 *blue, int regno);
 extern void intel_enable_clock_gating(struct drm_device *dev);
-extern void intel_disable_clock_gating(struct drm_device *dev);
 extern void ironlake_enable_drps(struct drm_device *dev);
 extern void ironlake_disable_drps(struct drm_device *dev);
 extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 0d0273e..c635c9e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -251,6 +251,27 @@
 				   &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
 }
 
+static bool
+intel_hdmi_detect_audio(struct drm_connector *connector)
+{
+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	struct edid *edid;
+	bool has_audio = false;
+
+	edid = drm_get_edid(connector,
+			    &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+	if (edid) {
+		if (edid->input & DRM_EDID_INPUT_DIGITAL)
+			has_audio = drm_detect_monitor_audio(edid);
+
+		connector->display_info.raw_edid = NULL;
+		kfree(edid);
+	}
+
+	return has_audio;
+}
+
 static int
 intel_hdmi_set_property(struct drm_connector *connector,
 		      struct drm_property *property,
@@ -264,17 +285,23 @@
 		return ret;
 
 	if (property == intel_hdmi->force_audio_property) {
-		if (val == intel_hdmi->force_audio)
+		int i = val;
+		bool has_audio;
+
+		if (i == intel_hdmi->force_audio)
 			return 0;
 
-		intel_hdmi->force_audio = val;
+		intel_hdmi->force_audio = i;
 
-		if (val > 0 && intel_hdmi->has_audio)
-			return 0;
-		if (val < 0 && !intel_hdmi->has_audio)
+		if (i == 0)
+			has_audio = intel_hdmi_detect_audio(connector);
+		else
+			has_audio = i > 0;
+
+		if (has_audio == intel_hdmi->has_audio)
 			return 0;
 
-		intel_hdmi->has_audio = val > 0;
+		intel_hdmi->has_audio = has_audio;
 		goto done;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index ace8d5d..bcdba7b 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -261,12 +261,6 @@
 		return true;
 	}
 
-	/* Make sure pre-965s set dither correctly */
-	if (INTEL_INFO(dev)->gen < 4) {
-		if (dev_priv->lvds_dither)
-			pfit_control |= PANEL_8TO6_DITHER_ENABLE;
-	}
-
 	/* Native modes don't need fitting */
 	if (adjusted_mode->hdisplay == mode->hdisplay &&
 	    adjusted_mode->vdisplay == mode->vdisplay)
@@ -374,10 +368,16 @@
 	}
 
 out:
+	/* If not enabling scaling, be consistent and always use 0. */
 	if ((pfit_control & PFIT_ENABLE) == 0) {
 		pfit_control = 0;
 		pfit_pgm_ratios = 0;
 	}
+
+	/* Make sure pre-965 set dither correctly */
+	if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
+		pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
 	if (pfit_control != intel_lvds->pfit_control ||
 	    pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
 		intel_lvds->pfit_control = pfit_control;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index f295a7a..64fd644 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -26,6 +26,7 @@
  */
 
 #include <linux/acpi.h>
+#include <linux/acpi_io.h>
 #include <acpi/video.h>
 
 #include "drmP.h"
@@ -476,7 +477,7 @@
 		return -ENOTSUPP;
 	}
 
-	base = ioremap(asls, OPREGION_SIZE);
+	base = acpi_os_ioremap(asls, OPREGION_SIZE);
 	if (!base)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 03e3370..445f27e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,6 +34,14 @@
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+static inline int ring_space(struct intel_ring_buffer *ring)
+{
+	int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
+	if (space < 0)
+		space += ring->size;
+	return space;
+}
+
 static u32 i915_gem_get_seqno(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
@@ -204,11 +212,9 @@
 	if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
 		i915_kernel_lost_context(ring->dev);
 	else {
-		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+		ring->head = I915_READ_HEAD(ring);
 		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-		ring->space = ring->head - (ring->tail + 8);
-		if (ring->space < 0)
-			ring->space += ring->size;
+		ring->space = ring_space(ring);
 	}
 
 	return 0;
@@ -921,7 +927,7 @@
 	}
 
 	ring->tail = 0;
-	ring->space = ring->head - 8;
+	ring->space = ring_space(ring);
 
 	return 0;
 }
@@ -933,20 +939,22 @@
 	unsigned long end;
 	u32 head;
 
+	/* If the reported head position has wrapped or hasn't advanced,
+	 * fallback to the slow and accurate path.
+	 */
+	head = intel_read_status_page(ring, 4);
+	if (head > ring->head) {
+		ring->head = head;
+		ring->space = ring_space(ring);
+		if (ring->space >= n)
+			return 0;
+	}
+
 	trace_i915_ring_wait_begin (dev);
 	end = jiffies + 3 * HZ;
 	do {
-		/* If the reported head position has wrapped or hasn't advanced,
-		 * fallback to the slow and accurate path.
-		 */
-		head = intel_read_status_page(ring, 4);
-		if (head < ring->actual_head)
-			head = I915_READ_HEAD(ring);
-		ring->actual_head = head;
-		ring->head = head & HEAD_ADDR;
-		ring->space = ring->head - (ring->tail + 8);
-		if (ring->space < 0)
-			ring->space += ring->size;
+		ring->head = I915_READ_HEAD(ring);
+		ring->space = ring_space(ring);
 		if (ring->space >= n) {
 			trace_i915_ring_wait_end(dev);
 			return 0;
@@ -1051,22 +1059,25 @@
 }
 
 static int gen6_ring_flush(struct intel_ring_buffer *ring,
-			   u32 invalidate_domains,
-			   u32 flush_domains)
+			   u32 invalidate, u32 flush)
 {
+	uint32_t cmd;
 	int ret;
 
-	if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+	if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
 		return 0;
 
 	ret = intel_ring_begin(ring, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_FLUSH_DW);
+	cmd = MI_FLUSH_DW;
+	if (invalidate & I915_GEM_GPU_DOMAINS)
+		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
+	intel_ring_emit(ring, cmd);
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
 	return 0;
 }
@@ -1222,22 +1233,25 @@
 }
 
 static int blt_ring_flush(struct intel_ring_buffer *ring,
-			   u32 invalidate_domains,
-			   u32 flush_domains)
+			  u32 invalidate, u32 flush)
 {
+	uint32_t cmd;
 	int ret;
 
-	if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+	if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
 		return 0;
 
 	ret = blt_ring_begin(ring, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, MI_FLUSH_DW);
+	cmd = MI_FLUSH_DW;
+	if (invalidate & I915_GEM_DOMAIN_RENDER)
+		cmd |= MI_INVALIDATE_TLB;
+	intel_ring_emit(ring, cmd);
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
 	return 0;
 }
@@ -1291,6 +1305,48 @@
 	return intel_init_ring_buffer(dev, ring);
 }
 
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+
+	*ring = render_ring;
+	if (INTEL_INFO(dev)->gen >= 6) {
+		ring->add_request = gen6_add_request;
+		ring->irq_get = gen6_render_ring_get_irq;
+		ring->irq_put = gen6_render_ring_put_irq;
+	} else if (IS_GEN5(dev)) {
+		ring->add_request = pc_render_add_request;
+		ring->get_seqno = pc_render_get_seqno;
+	}
+
+	ring->dev = dev;
+	INIT_LIST_HEAD(&ring->active_list);
+	INIT_LIST_HEAD(&ring->request_list);
+	INIT_LIST_HEAD(&ring->gpu_write_list);
+
+	ring->size = size;
+	ring->effective_size = ring->size;
+	if (IS_I830(ring->dev))
+		ring->effective_size -= 128;
+
+	ring->map.offset = start;
+	ring->map.size = size;
+	ring->map.type = 0;
+	ring->map.flags = 0;
+	ring->map.mtrr = 0;
+
+	drm_core_ioremap_wc(&ring->map, dev);
+	if (ring->map.handle == NULL) {
+		DRM_ERROR("can not ioremap virtual address for"
+			  " ring buffer\n");
+		return -ENOMEM;
+	}
+
+	ring->virtual_start = (void __force __iomem *)ring->map.handle;
+	return 0;
+}
+
 int intel_init_bsd_ring_buffer(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index be9087e..6d6fde8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -47,7 +47,6 @@
 	struct		drm_device *dev;
 	struct		drm_i915_gem_object *obj;
 
-	u32		actual_head;
 	u32		head;
 	u32		tail;
 	int		space;
@@ -167,4 +166,7 @@
 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
 void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
 
+/* DRI warts */
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
+
 #endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 45cd376..7c50cdc 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -46,6 +46,7 @@
                          SDVO_TV_MASK)
 
 #define IS_TV(c)	(c->output_flag & SDVO_TV_MASK)
+#define IS_TMDS(c)	(c->output_flag & SDVO_TMDS_MASK)
 #define IS_LVDS(c)	(c->output_flag & SDVO_LVDS_MASK)
 #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
 
@@ -473,20 +474,6 @@
 		return false;
 	}
 
-	i = 3;
-	while (status == SDVO_CMD_STATUS_PENDING && i--) {
-		if (!intel_sdvo_read_byte(intel_sdvo,
-					  SDVO_I2C_CMD_STATUS,
-					  &status))
-			return false;
-	}
-	if (status != SDVO_CMD_STATUS_SUCCESS) {
-		DRM_DEBUG_KMS("command returns response %s [%d]\n",
-			      status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP ? cmd_status_names[status] : "???",
-			      status);
-		return false;
-	}
-
 	return true;
 }
 
@@ -497,6 +484,8 @@
 	u8 status;
 	int i;
 
+	DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
+
 	/*
 	 * The documentation states that all commands will be
 	 * processed within 15µs, and that we need only poll
@@ -505,14 +494,19 @@
 	 *
 	 * Check 5 times in case the hardware failed to read the docs.
 	 */
-	do {
+	if (!intel_sdvo_read_byte(intel_sdvo,
+				  SDVO_I2C_CMD_STATUS,
+				  &status))
+		goto log_fail;
+
+	while (status == SDVO_CMD_STATUS_PENDING && retry--) {
+		udelay(15);
 		if (!intel_sdvo_read_byte(intel_sdvo,
 					  SDVO_I2C_CMD_STATUS,
 					  &status))
-			return false;
-	} while (status == SDVO_CMD_STATUS_PENDING && --retry);
+			goto log_fail;
+	}
 
-	DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(intel_sdvo));
 	if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
 		DRM_LOG_KMS("(%s)", cmd_status_names[status]);
 	else
@@ -533,7 +527,7 @@
 	return true;
 
 log_fail:
-	DRM_LOG_KMS("\n");
+	DRM_LOG_KMS("... failed\n");
 	return false;
 }
 
@@ -550,6 +544,7 @@
 static bool intel_sdvo_set_control_bus_switch(struct intel_sdvo *intel_sdvo,
 					      u8 ddc_bus)
 {
+	/* This must be the immediately preceding write before the i2c xfer */
 	return intel_sdvo_write_cmd(intel_sdvo,
 				    SDVO_CMD_SET_CONTROL_BUS_SWITCH,
 				    &ddc_bus, 1);
@@ -557,7 +552,10 @@
 
 static bool intel_sdvo_set_value(struct intel_sdvo *intel_sdvo, u8 cmd, const void *data, int len)
 {
-	return intel_sdvo_write_cmd(intel_sdvo, cmd, data, len);
+	if (!intel_sdvo_write_cmd(intel_sdvo, cmd, data, len))
+		return false;
+
+	return intel_sdvo_read_response(intel_sdvo, NULL, 0);
 }
 
 static bool
@@ -859,18 +857,21 @@
 
 	intel_dip_infoframe_csum(&avi_if);
 
-	if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_INDEX,
+	if (!intel_sdvo_set_value(intel_sdvo,
+				  SDVO_CMD_SET_HBUF_INDEX,
 				  set_buf_index, 2))
 		return false;
 
 	for (i = 0; i < sizeof(avi_if); i += 8) {
-		if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_DATA,
+		if (!intel_sdvo_set_value(intel_sdvo,
+					  SDVO_CMD_SET_HBUF_DATA,
 					  data, 8))
 			return false;
 		data++;
 	}
 
-	return intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_HBUF_TXRATE,
+	return intel_sdvo_set_value(intel_sdvo,
+				    SDVO_CMD_SET_HBUF_TXRATE,
 				    &tx_rate, 1);
 }
 
@@ -1359,7 +1360,8 @@
 				intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
 				intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
 			}
-		}
+		} else
+			status = connector_status_disconnected;
 		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
@@ -1407,10 +1409,25 @@
 
 	if ((intel_sdvo_connector->output_flag & response) == 0)
 		ret = connector_status_disconnected;
-	else if (response & SDVO_TMDS_MASK)
+	else if (IS_TMDS(intel_sdvo_connector))
 		ret = intel_sdvo_hdmi_sink_detect(connector);
-	else
-		ret = connector_status_connected;
+	else {
+		struct edid *edid;
+
+		/* if we have an edid check it matches the connection */
+		edid = intel_sdvo_get_edid(connector);
+		if (edid == NULL)
+			edid = intel_sdvo_get_analog_edid(connector);
+		if (edid != NULL) {
+			if (edid->input & DRM_EDID_INPUT_DIGITAL)
+				ret = connector_status_disconnected;
+			else
+				ret = connector_status_connected;
+			connector->display_info.raw_edid = NULL;
+			kfree(edid);
+		} else
+			ret = connector_status_connected;
+	}
 
 	/* May update encoder flag for like clock for SDVO TV, etc.*/
 	if (ret == connector_status_connected) {
@@ -1446,10 +1463,15 @@
 		edid = intel_sdvo_get_analog_edid(connector);
 
 	if (edid != NULL) {
-		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+		struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
+		bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+		bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
+
+		if (connector_is_digital == monitor_is_digital) {
 			drm_mode_connector_update_edid_property(connector, edid);
 			drm_add_edid_modes(connector, edid);
 		}
+
 		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}
@@ -1668,6 +1690,22 @@
 	kfree(connector);
 }
 
+static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
+{
+	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
+	struct edid *edid;
+	bool has_audio = false;
+
+	if (!intel_sdvo->is_hdmi)
+		return false;
+
+	edid = intel_sdvo_get_edid(connector);
+	if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
+		has_audio = drm_detect_monitor_audio(edid);
+
+	return has_audio;
+}
+
 static int
 intel_sdvo_set_property(struct drm_connector *connector,
 			struct drm_property *property,
@@ -1684,17 +1722,23 @@
 		return ret;
 
 	if (property == intel_sdvo_connector->force_audio_property) {
-		if (val == intel_sdvo_connector->force_audio)
+		int i = val;
+		bool has_audio;
+
+		if (i == intel_sdvo_connector->force_audio)
 			return 0;
 
-		intel_sdvo_connector->force_audio = val;
+		intel_sdvo_connector->force_audio = i;
 
-		if (val > 0 && intel_sdvo->has_hdmi_audio)
-			return 0;
-		if (val < 0 && !intel_sdvo->has_hdmi_audio)
+		if (i == 0)
+			has_audio = intel_sdvo_detect_hdmi_audio(connector);
+		else
+			has_audio = i > 0;
+
+		if (has_audio == intel_sdvo->has_hdmi_audio)
 			return 0;
 
-		intel_sdvo->has_hdmi_audio = val > 0;
+		intel_sdvo->has_hdmi_audio = has_audio;
 		goto done;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 93206e4..fe4a53a 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1234,7 +1234,8 @@
  * \return false if TV is disconnected.
  */
 static int
-intel_tv_detect_type (struct intel_tv *intel_tv)
+intel_tv_detect_type (struct intel_tv *intel_tv,
+		      struct drm_connector *connector)
 {
 	struct drm_encoder *encoder = &intel_tv->base.base;
 	struct drm_device *dev = encoder->dev;
@@ -1245,11 +1246,13 @@
 	int type;
 
 	/* Disable TV interrupts around load detect or we'll recurse */
-	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	i915_disable_pipestat(dev_priv, 0,
-			      PIPE_HOTPLUG_INTERRUPT_ENABLE |
-			      PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+		i915_disable_pipestat(dev_priv, 0,
+				      PIPE_HOTPLUG_INTERRUPT_ENABLE |
+				      PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	}
 
 	save_tv_dac = tv_dac = I915_READ(TV_DAC);
 	save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
@@ -1302,11 +1305,13 @@
 	I915_WRITE(TV_CTL, save_tv_ctl);
 
 	/* Restore interrupt config */
-	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-	i915_enable_pipestat(dev_priv, 0,
-			     PIPE_HOTPLUG_INTERRUPT_ENABLE |
-			     PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
+		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+		i915_enable_pipestat(dev_priv, 0,
+				     PIPE_HOTPLUG_INTERRUPT_ENABLE |
+				     PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+	}
 
 	return type;
 }
@@ -1356,7 +1361,7 @@
 	drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
 
 	if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
-		type = intel_tv_detect_type(intel_tv);
+		type = intel_tv_detect_type(intel_tv, connector);
 	} else if (force) {
 		struct drm_crtc *crtc;
 		int dpms_mode;
@@ -1364,7 +1369,7 @@
 		crtc = intel_get_load_detect_pipe(&intel_tv->base, connector,
 						  &mode, &dpms_mode);
 		if (crtc) {
-			type = intel_tv_detect_type(intel_tv);
+			type = intel_tv_detect_type(intel_tv, connector);
 			intel_release_load_detect_pipe(&intel_tv->base, connector,
 						       dpms_mode);
 		} else
@@ -1658,6 +1663,18 @@
 	intel_encoder = &intel_tv->base;
 	connector = &intel_connector->base;
 
+	/* The documentation, for the older chipsets at least, recommend
+	 * using a polling method rather than hotplug detection for TVs.
+	 * This is because in order to perform the hotplug detection, the PLLs
+	 * for the TV must be kept alive increasing power drain and starving
+	 * bandwidth from other encoders. Notably for instance, it causes
+	 * pipe underruns on Crestline when this encoder is supposedly idle.
+	 *
+	 * More recent chipsets favour HDMI rather than integrated S-Video.
+	 */
+	connector->polled =
+		DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
 	drm_connector_init(dev, connector, &intel_tv_connector_funcs,
 			   DRM_MODE_CONNECTOR_SVIDEO);
 
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 21d6c29..de70959 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -8,7 +8,7 @@
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
 	select FB
-	select FRAMEBUFFER_CONSOLE if !EMBEDDED
+	select FRAMEBUFFER_CONSOLE if !EXPERT
 	select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
 	select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
 	help
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 2aef5cd..6bdab89 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6228,7 +6228,7 @@
 		entry->tvconf.has_component_output = false;
 		break;
 	case OUTPUT_LVDS:
-		if ((conn & 0x00003f00) != 0x10)
+		if ((conn & 0x00003f00) >> 8 != 0x10)
 			entry->lvdsconf.use_straps_for_mode = true;
 		entry->lvdsconf.use_power_scripts = true;
 		break;
@@ -6310,6 +6310,9 @@
 static bool
 apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct dcb_table *dcb = &dev_priv->vbios.dcb;
+
 	/* Dell Precision M6300
 	 *   DCB entry 2: 02025312 00000010
 	 *   DCB entry 3: 02026312 00000020
@@ -6327,6 +6330,18 @@
 			return false;
 	}
 
+	/* GeForce3 Ti 200
+	 *
+	 * DCB reports an LVDS output that should be TMDS:
+	 *   DCB entry 1: f2005014 ffffffff
+	 */
+	if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
+		if (*conn == 0xf2005014 && *conf == 0xffffffff) {
+			fabricate_dcb_output(dcb, OUTPUT_TMDS, 1, 1, 1);
+			return false;
+		}
+	}
+
 	return true;
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index a7fae26..d38a4d9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -128,6 +128,7 @@
 		}
 	}
 
+	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
 	nouveau_bo_placement_set(nvbo, flags, 0);
 
 	nvbo->channel = chan;
@@ -166,17 +167,17 @@
 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
 {
 	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+	int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
 
 	if (dev_priv->card_type == NV_10 &&
-	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) {
+	    nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
+	    nvbo->bo.mem.num_pages < vram_pages / 2) {
 		/*
 		 * Make sure that the color and depth buffers are handled
 		 * by independent memory controller units. Up to a 9x
 		 * speed up when alpha-blending and depth-test are enabled
 		 * at the same time.
 		 */
-		int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
-
 		if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
 			nvbo->placement.fpfn = vram_pages / 2;
 			nvbo->placement.lpfn = ~0;
@@ -785,7 +786,7 @@
 	if (ret)
 		goto out;
 
-	ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+	ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
 out:
 	ttm_bo_mem_put(bo, &tmp_mem);
 	return ret;
@@ -811,11 +812,11 @@
 	if (ret)
 		return ret;
 
-	ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
+	ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
 	if (ret)
 		goto out;
 
-	ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
+	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
 	if (ret)
 		goto out;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index a21e000..390d82c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -507,6 +507,7 @@
 	int high_w = 0, high_h = 0, high_v = 0;
 
 	list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
+		mode->vrefresh = drm_mode_vrefresh(mode);
 		if (helper->mode_valid(connector, mode) != MODE_OK ||
 		    (mode->flags & DRM_MODE_FLAG_INTERLACE))
 			continue;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 13bb672..f658a04 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -234,9 +234,9 @@
 		pci_set_power_state(pdev, PCI_D3hot);
 	}
 
-	acquire_console_sem();
+	console_lock();
 	nouveau_fbcon_set_suspend(dev, 1);
-	release_console_sem();
+	console_unlock();
 	nouveau_fbcon_restore_accel(dev);
 	return 0;
 
@@ -359,9 +359,9 @@
 		nv_crtc->lut.depth = 0;
 	}
 
-	acquire_console_sem();
+	console_lock();
 	nouveau_fbcon_set_suspend(dev, 0);
-	release_console_sem();
+	console_unlock();
 
 	nouveau_fbcon_zfill_all(dev);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 01bffc4..9821fca 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -848,9 +848,6 @@
 				     struct nouveau_fence *fence);
 extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
 
-/* nvc0_vram.c */
-extern const struct ttm_mem_type_manager_func nvc0_vram_manager;
-
 /* nouveau_notifier.c */
 extern int  nouveau_notifier_init_channel(struct nouveau_channel *);
 extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index fb846a3..4399e2f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -443,7 +443,7 @@
 	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
 
 	if (pm->hwmon) {
-		sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup);
+		sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
 		hwmon_device_unregister(pm->hwmon);
 	}
 #endif
@@ -543,7 +543,7 @@
 	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
 	struct nouveau_pm_level *perflvl;
 
-	if (pm->cur == &pm->boot)
+	if (!pm->cur || pm->cur == &pm->boot)
 		return;
 
 	perflvl = pm->cur;
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 7ecc4ad..8d9968e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -265,8 +265,8 @@
 	struct i2c_board_info info[] = {
 		{ I2C_BOARD_INFO("w83l785ts", 0x2d) },
 		{ I2C_BOARD_INFO("w83781d", 0x2d) },
-		{ I2C_BOARD_INFO("f75375", 0x2e) },
 		{ I2C_BOARD_INFO("adt7473", 0x2e) },
+		{ I2C_BOARD_INFO("f75375", 0x2e) },
 		{ I2C_BOARD_INFO("lm99", 0x4c) },
 		{ }
 	};
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index ef23550..c82db37 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -342,8 +342,8 @@
 	if (nv_encoder->dcb->type == OUTPUT_LVDS) {
 		bool duallink, dummy;
 
-		nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode->
-					      clock, &duallink, &dummy);
+		nouveau_bios_parse_lvds_table(dev, output_mode->clock,
+					      &duallink, &dummy);
 		if (duallink)
 			regp->fp_control |= (8 << 28);
 	} else
@@ -518,8 +518,6 @@
 		return;
 
 	if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
-		struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
-
 		/* when removing an output, crtc may not be set, but PANEL_OFF
 		 * must still be run
 		 */
@@ -527,12 +525,8 @@
 			   nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
 
 		if (mode == DRM_MODE_DPMS_ON) {
-			if (!nv_connector->native_mode) {
-				NV_ERROR(dev, "Not turning on LVDS without native mode\n");
-				return;
-			}
 			call_lvds_script(dev, nv_encoder->dcb, head,
-					 LVDS_PANEL_ON, nv_connector->native_mode->clock);
+					 LVDS_PANEL_ON, nv_encoder->mode.clock);
 		} else
 			/* pxclk of 0 is fine for PANEL_OFF, and for a
 			 * disconnected LVDS encoder there is no native_mode
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 8870d72..18d30c2 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -211,30 +211,35 @@
 	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
 
 	switch (dev_priv->chipset) {
-	case 0x44:
-	case 0x4a:
+	case 0x40:
+	case 0x41: /* guess */
+	case 0x42:
+	case 0x43:
+	case 0x45: /* guess */
 	case 0x4e:
 		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
 		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
 		nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
-		break;
-
-	case 0x46:
-	case 0x47:
-	case 0x49:
-	case 0x4b:
-		nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
-		nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
-		nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
 		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
 		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
 		nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
 		break;
-
-	default:
+	case 0x44:
+	case 0x4a:
 		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
 		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
 		nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
+		break;
+	case 0x46:
+	case 0x47:
+	case 0x49:
+	case 0x4b:
+	case 0x4c:
+	case 0x67:
+	default:
+		nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
+		nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
+		nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
 		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
 		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
 		nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
@@ -396,17 +401,20 @@
 		break;
 	default:
 		switch (dev_priv->chipset) {
-		case 0x46:
-		case 0x47:
-		case 0x49:
-		case 0x4b:
-			nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
-			nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
-			break;
-		default:
+		case 0x41:
+		case 0x42:
+		case 0x43:
+		case 0x45:
+		case 0x4e:
+		case 0x44:
+		case 0x4a:
 			nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
 			nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
 			break;
+		default:
+			nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
+			nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
+			break;
 		}
 		nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
 		nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index 14e24e9..0ea090f 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -283,8 +283,7 @@
 			nv50_evo_channel_del(&dev_priv->evo);
 			return ret;
 		}
-	} else
-	if (dev_priv->chipset != 0x50) {
+	} else {
 		ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
 					  0, 0xffffffff, 0x00010000);
 		if (ret) {
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 2d7ea75..37e21d2 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -256,6 +256,7 @@
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
 	int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
 	unsigned long flags;
 
@@ -265,6 +266,7 @@
 		return;
 
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pfifo->reassign(dev, false);
 	pgraph->fifo_access(dev, false);
 
 	if (pgraph->channel(dev) == chan)
@@ -275,6 +277,7 @@
 	dev_priv->engine.instmem.flush(dev);
 
 	pgraph->fifo_access(dev, true);
+	pfifo->reassign(dev, true);
 	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 
 	nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 38e523e..459ff08 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -45,11 +45,6 @@
 	}
 
 	if (phys & 1) {
-		if (dev_priv->vram_sys_base) {
-			phys += dev_priv->vram_sys_base;
-			phys |= 0x30;
-		}
-
 		if (coverage <= 32 * 1024 * 1024)
 			phys |= 0x60;
 		else if (coverage <= 64 * 1024 * 1024)
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index e6ea7d8..eb18a7e 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -31,6 +31,7 @@
 #include "nvc0_graph.h"
 
 static void nvc0_graph_isr(struct drm_device *);
+static void nvc0_runk140_isr(struct drm_device *);
 static int  nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan);
 
 void
@@ -281,6 +282,7 @@
 		return;
 
 	nouveau_irq_unregister(dev, 12);
+	nouveau_irq_unregister(dev, 25);
 
 	nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
 	nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
@@ -390,6 +392,7 @@
 	}
 
 	nouveau_irq_register(dev, 12, nvc0_graph_isr);
+	nouveau_irq_register(dev, 25, nvc0_runk140_isr);
 	NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
 	NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
 	NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
@@ -512,8 +515,8 @@
 			nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000);
 			nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000);
 			nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000);
-			nv_wr32(dev, TP_UNIT(gpc, tp, 0xe44), 0x001ffffe);
-			nv_wr32(dev, TP_UNIT(gpc, tp, 0xe4c), 0x0000000f);
+			nv_wr32(dev, TP_UNIT(gpc, tp, 0x644), 0x001ffffe);
+			nv_wr32(dev, TP_UNIT(gpc, tp, 0x64c), 0x0000000f);
 		}
 		nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
 		nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
@@ -777,3 +780,19 @@
 
 	nv_wr32(dev, 0x400500, 0x00010001);
 }
+
+static void
+nvc0_runk140_isr(struct drm_device *dev)
+{
+	u32 units = nv_rd32(dev, 0x00017c) & 0x1f;
+
+	while (units) {
+		u32 unit = ffs(units) - 1;
+		u32 reg = 0x140000 + unit * 0x2000;
+		u32 st0 = nv_mask(dev, reg + 0x1020, 0, 0);
+		u32 st1 = nv_mask(dev, reg + 0x1420, 0, 0);
+
+		NV_INFO(dev, "PRUNK140: %d 0x%08x 0x%08x\n", unit, st0, st1);
+		units &= ~(1 << unit);
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index b9e68b2..f880ff7 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -1830,7 +1830,7 @@
 
 	for (tp = 0, id = 0; tp < 4; tp++) {
 		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-			if (tp <= priv->tp_nr[gpc]) {
+			if (tp < priv->tp_nr[gpc]) {
 				nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id);
 				nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id);
 				nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b0ab185..a4e5e53 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -48,29 +48,29 @@
 
 	switch (radeon_crtc->rmx_type) {
 	case RMX_CENTER:
-		args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
-		args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
-		args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
-		args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
+		args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
+		args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
+		args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
+		args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
 		break;
 	case RMX_ASPECT:
 		a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
 		a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
 
 		if (a1 > a2) {
-			args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
-			args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2;
+			args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
+			args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
 		} else if (a2 > a1) {
-			args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
-			args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
+			args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
+			args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
 		}
 		break;
 	case RMX_FULL:
 	default:
-		args.usOverscanRight = radeon_crtc->h_border;
-		args.usOverscanLeft = radeon_crtc->h_border;
-		args.usOverscanBottom = radeon_crtc->v_border;
-		args.usOverscanTop = radeon_crtc->v_border;
+		args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border);
+		args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border);
+		args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border);
+		args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border);
 		break;
 	}
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
@@ -419,23 +419,23 @@
 	memset(&args, 0, sizeof(args));
 
 	if (ASIC_IS_DCE5(rdev)) {
-		args.v3.usSpreadSpectrumAmountFrac = 0;
+		args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
 		args.v3.ucSpreadSpectrumType = ss->type;
 		switch (pll_id) {
 		case ATOM_PPLL1:
 			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
-			args.v3.usSpreadSpectrumAmount = ss->amount;
-			args.v3.usSpreadSpectrumStep = ss->step;
+			args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+			args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
 			break;
 		case ATOM_PPLL2:
 			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
-			args.v3.usSpreadSpectrumAmount = ss->amount;
-			args.v3.usSpreadSpectrumStep = ss->step;
+			args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+			args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
 			break;
 		case ATOM_DCPLL:
 			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
-			args.v3.usSpreadSpectrumAmount = 0;
-			args.v3.usSpreadSpectrumStep = 0;
+			args.v3.usSpreadSpectrumAmount = cpu_to_le16(0);
+			args.v3.usSpreadSpectrumStep = cpu_to_le16(0);
 			break;
 		case ATOM_PPLL_INVALID:
 			return;
@@ -447,18 +447,18 @@
 		switch (pll_id) {
 		case ATOM_PPLL1:
 			args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
-			args.v2.usSpreadSpectrumAmount = ss->amount;
-			args.v2.usSpreadSpectrumStep = ss->step;
+			args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+			args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
 			break;
 		case ATOM_PPLL2:
 			args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
-			args.v2.usSpreadSpectrumAmount = ss->amount;
-			args.v2.usSpreadSpectrumStep = ss->step;
+			args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
+			args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
 			break;
 		case ATOM_DCPLL:
 			args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
-			args.v2.usSpreadSpectrumAmount = 0;
-			args.v2.usSpreadSpectrumStep = 0;
+			args.v2.usSpreadSpectrumAmount = cpu_to_le16(0);
+			args.v2.usSpreadSpectrumStep = cpu_to_le16(0);
 			break;
 		case ATOM_PPLL_INVALID:
 			return;
@@ -538,7 +538,6 @@
 			pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
 		else
 			pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
-
 	}
 
 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -555,23 +554,28 @@
 					dp_clock = dig_connector->dp_clock;
 				}
 			}
-#if 0 /* doesn't work properly on some laptops */
+
 			/* use recommended ref_div for ss */
 			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
 				if (ss_enabled) {
 					if (ss->refdiv) {
+						pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
 						pll->flags |= RADEON_PLL_USE_REF_DIV;
 						pll->reference_div = ss->refdiv;
+						if (ASIC_IS_AVIVO(rdev))
+							pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
 					}
 				}
 			}
-#endif
+
 			if (ASIC_IS_AVIVO(rdev)) {
 				/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
 				if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
 					adjusted_clock = mode->clock * 2;
 				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
 					pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
+				if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+					pll->flags |= RADEON_PLL_IS_LCD;
 			} else {
 				if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
 					pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -606,14 +610,9 @@
 				args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
 				args.v1.ucTransmitterID = radeon_encoder->encoder_id;
 				args.v1.ucEncodeMode = encoder_mode;
-				if (encoder_mode == ATOM_ENCODER_MODE_DP) {
-					if (ss_enabled)
-						args.v1.ucConfig |=
-							ADJUST_DISPLAY_CONFIG_SS_ENABLE;
-				} else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
+				if (ss_enabled)
 					args.v1.ucConfig |=
 						ADJUST_DISPLAY_CONFIG_SS_ENABLE;
-				}
 
 				atom_execute_table(rdev->mode_info.atom_context,
 						   index, (uint32_t *)&args);
@@ -624,12 +623,12 @@
 				args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
 				args.v3.sInput.ucEncodeMode = encoder_mode;
 				args.v3.sInput.ucDispPllConfig = 0;
+				if (ss_enabled)
+					args.v3.sInput.ucDispPllConfig |=
+						DISPPLL_CONFIG_SS_ENABLE;
 				if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
 					struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 					if (encoder_mode == ATOM_ENCODER_MODE_DP) {
-						if (ss_enabled)
-							args.v3.sInput.ucDispPllConfig |=
-								DISPPLL_CONFIG_SS_ENABLE;
 						args.v3.sInput.ucDispPllConfig |=
 							DISPPLL_CONFIG_COHERENT_MODE;
 						/* 16200 or 27000 */
@@ -649,18 +648,11 @@
 					}
 				} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
 					if (encoder_mode == ATOM_ENCODER_MODE_DP) {
-						if (ss_enabled)
-							args.v3.sInput.ucDispPllConfig |=
-								DISPPLL_CONFIG_SS_ENABLE;
 						args.v3.sInput.ucDispPllConfig |=
 							DISPPLL_CONFIG_COHERENT_MODE;
 						/* 16200 or 27000 */
 						args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
-					} else if (encoder_mode == ATOM_ENCODER_MODE_LVDS) {
-						if (ss_enabled)
-							args.v3.sInput.ucDispPllConfig |=
-								DISPPLL_CONFIG_SS_ENABLE;
-					} else {
+					} else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) {
 						if (mode->clock > 165000)
 							args.v3.sInput.ucDispPllConfig |=
 								DISPPLL_CONFIG_DUAL_LINK;
@@ -670,10 +662,12 @@
 						   index, (uint32_t *)&args);
 				adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
 				if (args.v3.sOutput.ucRefDiv) {
+					pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
 					pll->flags |= RADEON_PLL_USE_REF_DIV;
 					pll->reference_div = args.v3.sOutput.ucRefDiv;
 				}
 				if (args.v3.sOutput.ucPostDiv) {
+					pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
 					pll->flags |= RADEON_PLL_USE_POST_DIV;
 					pll->post_div = args.v3.sOutput.ucPostDiv;
 				}
@@ -727,14 +721,14 @@
 			 * SetPixelClock provides the dividers
 			 */
 			args.v5.ucCRTC = ATOM_CRTC_INVALID;
-			args.v5.usPixelClock = dispclk;
+			args.v5.usPixelClock = cpu_to_le16(dispclk);
 			args.v5.ucPpll = ATOM_DCPLL;
 			break;
 		case 6:
 			/* if the default dcpll clock is specified,
 			 * SetPixelClock provides the dividers
 			 */
-			args.v6.ulDispEngClkFreq = dispclk;
+			args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
 			args.v6.ucPpll = ATOM_DCPLL;
 			break;
 		default:
@@ -963,8 +957,12 @@
 	/* adjust pixel clock as needed */
 	adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
 
-	radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
-			   &ref_div, &post_div);
+	if (ASIC_IS_AVIVO(rdev))
+		radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+					 &ref_div, &post_div);
+	else
+		radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+					  &ref_div, &post_div);
 
 	atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
 
@@ -993,9 +991,9 @@
 	}
 }
 
-static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
-				      struct drm_framebuffer *fb,
-				      int x, int y, int atomic)
+static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 int x, int y, int atomic)
 {
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
@@ -1006,6 +1004,7 @@
 	struct radeon_bo *rbo;
 	uint64_t fb_location;
 	uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+	u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
 	int r;
 
 	/* no fb bound */
@@ -1057,11 +1056,17 @@
 	case 16:
 		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
 			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
 		break;
 	case 24:
 	case 32:
 		fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
 			     EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+#ifdef __BIG_ENDIAN
+		fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
 		break;
 	default:
 		DRM_ERROR("Unsupported screen depth %d\n",
@@ -1106,6 +1111,7 @@
 	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
 	       (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
 	WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+	WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
 
 	WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
 	WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
@@ -1127,12 +1133,6 @@
 	WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
 	       (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
 
-	if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
-		WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
-		       EVERGREEN_INTERLEAVE_EN);
-	else
-		WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
-
 	if (!atomic && fb && fb != crtc->fb) {
 		radeon_fb = to_radeon_framebuffer(fb);
 		rbo = radeon_fb->obj->driver_private;
@@ -1162,6 +1162,7 @@
 	struct drm_framebuffer *target_fb;
 	uint64_t fb_location;
 	uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+	u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
 	int r;
 
 	/* no fb bound */
@@ -1215,12 +1216,18 @@
 		fb_format =
 		    AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
 		    AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
+#ifdef __BIG_ENDIAN
+		fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
+#endif
 		break;
 	case 24:
 	case 32:
 		fb_format =
 		    AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
 		    AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
+#ifdef __BIG_ENDIAN
+		fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
+#endif
 		break;
 	default:
 		DRM_ERROR("Unsupported screen depth %d\n",
@@ -1260,6 +1267,8 @@
 	WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS +
 	       radeon_crtc->crtc_offset, (u32) fb_location);
 	WREG32(AVIVO_D1GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+	if (rdev->family >= CHIP_R600)
+		WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
 
 	WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
 	WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
@@ -1281,12 +1290,6 @@
 	WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
 	       (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
 
-	if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
-		WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
-		       AVIVO_D1MODE_INTERLEAVE_EN);
-	else
-		WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
-
 	if (!atomic && fb && fb != crtc->fb) {
 		radeon_fb = to_radeon_framebuffer(fb);
 		rbo = radeon_fb->obj->driver_private;
@@ -1310,7 +1313,7 @@
 	struct radeon_device *rdev = dev->dev_private;
 
 	if (ASIC_IS_DCE4(rdev))
-		return evergreen_crtc_do_set_base(crtc, old_fb, x, y, 0);
+		return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0);
 	else if (ASIC_IS_AVIVO(rdev))
 		return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0);
 	else
@@ -1325,7 +1328,7 @@
        struct radeon_device *rdev = dev->dev_private;
 
 	if (ASIC_IS_DCE4(rdev))
-		return evergreen_crtc_do_set_base(crtc, fb, x, y, 1);
+		return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
 	else if (ASIC_IS_AVIVO(rdev))
 		return avivo_crtc_do_set_base(crtc, fb, x, y, 1);
 	else
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 4e7778d..695de9a 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -187,9 +187,9 @@
 int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
 {
 	int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock);
-	int bw = dp_lanes_for_mode_clock(dpcd, mode_clock);
+	int dp_clock = dp_link_clock_for_mode_clock(dpcd, mode_clock);
 
-	if ((lanes == 0) || (bw == 0))
+	if ((lanes == 0) || (dp_clock == 0))
 		return MODE_CLOCK_HIGH;
 
 	return MODE_OK;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index a8973ac..d270b3f 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -97,26 +97,29 @@
 }
 
 /* get temperature in millidegrees */
-u32 evergreen_get_temp(struct radeon_device *rdev)
+int evergreen_get_temp(struct radeon_device *rdev)
 {
 	u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
 		ASIC_T_SHIFT;
 	u32 actual_temp = 0;
 
-	if ((temp >> 10) & 1)
-		actual_temp = 0;
-	else if ((temp >> 9) & 1)
+	if (temp & 0x400)
+		actual_temp = -256;
+	else if (temp & 0x200)
 		actual_temp = 255;
-	else
-		actual_temp = (temp >> 1) & 0xff;
+	else if (temp & 0x100) {
+		actual_temp = temp & 0x1ff;
+		actual_temp |= ~0x1ff;
+	} else
+		actual_temp = temp & 0xff;
 
-	return actual_temp * 1000;
+	return (actual_temp * 1000) / 2;
 }
 
-u32 sumo_get_temp(struct radeon_device *rdev)
+int sumo_get_temp(struct radeon_device *rdev)
 {
 	u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
-	u32 actual_temp = (temp >> 1) & 0xff;
+	int actual_temp = temp - 49;
 
 	return actual_temp * 1000;
 }
@@ -1182,6 +1185,22 @@
 /*
  * CP.
  */
+void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+	/* set to DX10/11 mode */
+	radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
+	radeon_ring_write(rdev, 1);
+	/* FIXME: implement */
+	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+	radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (ib->gpu_addr & 0xFFFFFFFC));
+	radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
+	radeon_ring_write(rdev, ib->length_dw);
+}
+
 
 static int evergreen_cp_load_microcode(struct radeon_device *rdev)
 {
@@ -1192,7 +1211,11 @@
 		return -EINVAL;
 
 	r700_cp_stop(rdev);
-	WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
+	WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+	       BUF_SWAP_32BIT |
+#endif
+	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
 
 	fw_data = (const __be32 *)rdev->pfp_fw->data;
 	WREG32(CP_PFP_UCODE_ADDR, 0);
@@ -1233,7 +1256,7 @@
 	cp_me = 0xff;
 	WREG32(CP_ME_CNTL, cp_me);
 
-	r = radeon_ring_lock(rdev, evergreen_default_size + 15);
+	r = radeon_ring_lock(rdev, evergreen_default_size + 19);
 	if (r) {
 		DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
 		return r;
@@ -1266,6 +1289,11 @@
 	radeon_ring_write(rdev, 0xffffffff);
 	radeon_ring_write(rdev, 0xffffffff);
 
+	radeon_ring_write(rdev, 0xc0026900);
+	radeon_ring_write(rdev, 0x00000316);
+	radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+	radeon_ring_write(rdev, 0x00000010); /*  */
+
 	radeon_ring_unlock_commit(rdev);
 
 	return 0;
@@ -1306,7 +1334,11 @@
 	WREG32(CP_RB_WPTR, 0);
 
 	/* set the wb address wether it's enabled or not */
-	WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(CP_RB_RPTR_ADDR,
+#ifdef __BIG_ENDIAN
+	       RB_RPTR_SWAP(2) |
+#endif
+	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
 	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
 	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
 
@@ -2072,6 +2104,7 @@
 	WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
 
 	WREG32(VGT_GS_VERTEX_REUSE, 16);
+	WREG32(PA_SU_LINE_STIPPLE_VALUE, 0);
 	WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
 
 	WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
@@ -2201,6 +2234,9 @@
 	struct evergreen_mc_save save;
 	u32 grbm_reset = 0;
 
+	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+		return 0;
+
 	dev_info(rdev->dev, "GPU softreset \n");
 	dev_info(rdev->dev, "  GRBM_STATUS=0x%08X\n",
 		RREG32(GRBM_STATUS));
@@ -2603,8 +2639,8 @@
 	while (rptr != wptr) {
 		/* wptr/rptr are in bytes! */
 		ring_index = rptr / 4;
-		src_id =  rdev->ih.ring[ring_index] & 0xff;
-		src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+		src_id =  le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
 
 		switch (src_id) {
 		case 1: /* D1 vblank/vline */
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index b758dc7..2adfb03 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -55,7 +55,7 @@
 	if (h < 8)
 		h = 8;
 
-	cb_color_info = ((format << 2) | (1 << 24));
+	cb_color_info = ((format << 2) | (1 << 24) | (1 << 8));
 	pitch = (w / 8) - 1;
 	slice = ((w * h) / 64) - 1;
 
@@ -133,6 +133,9 @@
 
 	/* high addr, stride */
 	sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
+#ifdef __BIG_ENDIAN
+	sq_vtx_constant_word2 |= (2 << 30);
+#endif
 	/* xyzw swizzles */
 	sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12);
 
@@ -173,7 +176,7 @@
 	sq_tex_resource_word0 = (1 << 0); /* 2D */
 	sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
 				  ((w - 1) << 18));
-	sq_tex_resource_word1 = ((h - 1) << 0);
+	sq_tex_resource_word1 = ((h - 1) << 0) | (1 << 28);
 	/* xyzw swizzles */
 	sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25);
 
@@ -221,7 +224,11 @@
 	radeon_ring_write(rdev, DI_PT_RECTLIST);
 
 	radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
-	radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT);
+	radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+			  (2 << 2) |
+#endif
+			  DI_INDEX_SIZE_16_BIT);
 
 	radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
 	radeon_ring_write(rdev, 1);
@@ -232,7 +239,7 @@
 
 }
 
-/* emits 30 */
+/* emits 36 */
 static void
 set_default_state(struct radeon_device *rdev)
 {
@@ -245,6 +252,8 @@
 	int num_hs_threads, num_ls_threads;
 	int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
 	int num_hs_stack_entries, num_ls_stack_entries;
+	u64 gpu_addr;
+	int dwords;
 
 	switch (rdev->family) {
 	case CHIP_CEDAR:
@@ -497,6 +506,18 @@
 	radeon_ring_write(rdev, 0x00000000);
 	radeon_ring_write(rdev, 0x00000000);
 
+	/* set to DX10/11 mode */
+	radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
+	radeon_ring_write(rdev, 1);
+
+	/* emit an IB pointing at default state */
+	dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
+	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
+	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+	radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
+	radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
+	radeon_ring_write(rdev, dwords);
+
 }
 
 static inline uint32_t i2f(uint32_t input)
@@ -527,8 +548,10 @@
 int evergreen_blit_init(struct radeon_device *rdev)
 {
 	u32 obj_size;
-	int r;
+	int i, r, dwords;
 	void *ptr;
+	u32 packet2s[16];
+	int num_packet2s = 0;
 
 	/* pin copy shader into vram if already initialized */
 	if (rdev->r600_blit.shader_obj)
@@ -536,8 +559,17 @@
 
 	mutex_init(&rdev->r600_blit.mutex);
 	rdev->r600_blit.state_offset = 0;
-	rdev->r600_blit.state_len = 0;
-	obj_size = 0;
+
+	rdev->r600_blit.state_len = evergreen_default_size;
+
+	dwords = rdev->r600_blit.state_len;
+	while (dwords & 0xf) {
+		packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
+		dwords++;
+	}
+
+	obj_size = dwords * 4;
+	obj_size = ALIGN(obj_size, 256);
 
 	rdev->r600_blit.vs_offset = obj_size;
 	obj_size += evergreen_vs_size * 4;
@@ -567,8 +599,16 @@
 		return r;
 	}
 
-	memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4);
-	memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4);
+	memcpy_toio(ptr + rdev->r600_blit.state_offset,
+		    evergreen_default_state, rdev->r600_blit.state_len * 4);
+
+	if (num_packet2s)
+		memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
+			    packet2s, num_packet2s * 4);
+	for (i = 0; i < evergreen_vs_size; i++)
+		*(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
+	for (i = 0; i < evergreen_ps_size; i++)
+		*(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
 	radeon_bo_kunmap(rdev->r600_blit.shader_obj);
 	radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 
@@ -652,7 +692,7 @@
 	/* calculate number of loops correctly */
 	ring_size = num_loops * dwords_per_loop;
 	/* set default  + shaders */
-	ring_size += 46; /* shaders + def state */
+	ring_size += 52; /* shaders + def state */
 	ring_size += 10; /* fence emit for VB IB */
 	ring_size += 5; /* done copy */
 	ring_size += 10; /* fence emit for done copy */
@@ -660,7 +700,7 @@
 	if (r)
 		return r;
 
-	set_default_state(rdev); /* 30 */
+	set_default_state(rdev); /* 36 */
 	set_shaders(rdev); /* 16 */
 	return 0;
 }
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
index ef1d28c..3a10399 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
@@ -311,11 +311,19 @@
 	0x00000000,
 	0x3c000000,
 	0x67961001,
+#ifdef __BIG_ENDIAN
+	0x000a0000,
+#else
 	0x00080000,
+#endif
 	0x00000000,
 	0x1c000000,
 	0x67961000,
+#ifdef __BIG_ENDIAN
+	0x00020008,
+#else
 	0x00000008,
+#endif
 	0x00000000,
 };
 
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 36d32d8..eb4acf4 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -98,6 +98,7 @@
 #define		BUF_SWAP_32BIT					(2 << 16)
 #define	CP_RB_RPTR					0x8700
 #define	CP_RB_RPTR_ADDR					0xC10C
+#define		RB_RPTR_SWAP(x)					((x) << 0)
 #define	CP_RB_RPTR_ADDR_HI				0xC110
 #define	CP_RB_RPTR_WR					0xC108
 #define	CP_RB_WPTR					0xC114
@@ -240,6 +241,7 @@
 #define		FORCE_EOV_MAX_CLK_CNT(x)			((x) << 0)
 #define		FORCE_EOV_MAX_REZ_CNT(x)			((x) << 16)
 #define PA_SC_LINE_STIPPLE				0x28A0C
+#define	PA_SU_LINE_STIPPLE_VALUE			0x8A60
 #define	PA_SC_LINE_STIPPLE_STATE			0x8B10
 
 #define	SCRATCH_REG0					0x8500
@@ -652,6 +654,7 @@
 #define	PACKET3_DISPATCH_DIRECT				0x15
 #define	PACKET3_DISPATCH_INDIRECT			0x16
 #define	PACKET3_INDIRECT_BUFFER_END			0x17
+#define	PACKET3_MODE_CONTROL				0x18
 #define	PACKET3_SET_PREDICATION				0x20
 #define	PACKET3_REG_RMW					0x21
 #define	PACKET3_COND_EXEC				0x22
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index 607241c..5a82b6b 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -673,8 +673,10 @@
 	last_reg = strtol(last_reg_s, NULL, 16);
 
 	do {
-		if (fgets(buf, 1024, file) == NULL)
+		if (fgets(buf, 1024, file) == NULL) {
+			fclose(file);
 			return -1;
+		}
 		len = strlen(buf);
 		if (ftell(file) == end)
 			done = 1;
@@ -685,6 +687,7 @@
 				fprintf(stderr,
 					"Error matching regular expression %d in %s\n",
 					r, filename);
+				fclose(file);
 				return -1;
 			} else {
 				buf[match[0].rm_eo] = 0;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 46da514..56deae5 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1031,8 +1031,8 @@
 	WREG32(RADEON_CP_CSQ_MODE,
 	       REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
 	       REG_SET(RADEON_INDIRECT1_START, indirect1_start));
-	WREG32(0x718, 0);
-	WREG32(0x744, 0x00004D4D);
+	WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
+	WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
 	WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
 	radeon_ring_start(rdev);
 	r = radeon_ring_test(rdev);
@@ -1427,6 +1427,7 @@
 		}
 		track->zb.robj = reloc->robj;
 		track->zb.offset = idx_value;
+		track->zb_dirty = true;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		break;
 	case RADEON_RB3D_COLOROFFSET:
@@ -1439,6 +1440,7 @@
 		}
 		track->cb[0].robj = reloc->robj;
 		track->cb[0].offset = idx_value;
+		track->cb_dirty = true;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		break;
 	case RADEON_PP_TXOFFSET_0:
@@ -1454,6 +1456,7 @@
 		}
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		track->textures[i].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_CUBIC_OFFSET_T0_0:
 	case RADEON_PP_CUBIC_OFFSET_T0_1:
@@ -1471,6 +1474,7 @@
 		track->textures[0].cube_info[i].offset = idx_value;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		track->textures[0].cube_info[i].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_CUBIC_OFFSET_T1_0:
 	case RADEON_PP_CUBIC_OFFSET_T1_1:
@@ -1488,6 +1492,7 @@
 		track->textures[1].cube_info[i].offset = idx_value;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		track->textures[1].cube_info[i].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_CUBIC_OFFSET_T2_0:
 	case RADEON_PP_CUBIC_OFFSET_T2_1:
@@ -1505,9 +1510,12 @@
 		track->textures[2].cube_info[i].offset = idx_value;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		track->textures[2].cube_info[i].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	case RADEON_RE_WIDTH_HEIGHT:
 		track->maxy = ((idx_value >> 16) & 0x7FF);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_COLORPITCH:
 		r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1528,9 +1536,11 @@
 		ib[idx] = tmp;
 
 		track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
+		track->cb_dirty = true;
 		break;
 	case RADEON_RB3D_DEPTHPITCH:
 		track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_CNTL:
 		switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
@@ -1555,6 +1565,8 @@
 			return -EINVAL;
 		}
 		track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_ZSTENCILCNTL:
 		switch (idx_value & 0xf) {
@@ -1572,6 +1584,7 @@
 		default:
 			break;
 		}
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_ZPASS_ADDR:
 		r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1588,6 +1601,7 @@
 			uint32_t temp = idx_value >> 4;
 			for (i = 0; i < track->num_texture; i++)
 				track->textures[i].enabled = !!(temp & (1 << i));
+			track->tex_dirty = true;
 		}
 		break;
 	case RADEON_SE_VF_CNTL:
@@ -1602,12 +1616,14 @@
 		i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
 		track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
 		track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_TEX_PITCH_0:
 	case RADEON_PP_TEX_PITCH_1:
 	case RADEON_PP_TEX_PITCH_2:
 		i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
 		track->textures[i].pitch = idx_value + 32;
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_TXFILTER_0:
 	case RADEON_PP_TXFILTER_1:
@@ -1621,6 +1637,7 @@
 		tmp = (idx_value >> 27) & 0x7;
 		if (tmp == 2 || tmp == 6)
 			track->textures[i].roundup_h = false;
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_TXFORMAT_0:
 	case RADEON_PP_TXFORMAT_1:
@@ -1673,6 +1690,7 @@
 		}
 		track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
 		track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
+		track->tex_dirty = true;
 		break;
 	case RADEON_PP_CUBIC_FACES_0:
 	case RADEON_PP_CUBIC_FACES_1:
@@ -1683,6 +1701,7 @@
 			track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
 			track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
 		}
+		track->tex_dirty = true;
 		break;
 	default:
 		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
@@ -2347,10 +2366,10 @@
 
 	temp = RREG32(RADEON_CONFIG_CNTL);
 	if (state == false) {
-		temp &= ~(1<<8);
-		temp |= (1<<9);
+		temp &= ~RADEON_CFG_VGA_RAM_EN;
+		temp |= RADEON_CFG_VGA_IO_DIS;
 	} else {
-		temp &= ~(1<<9);
+		temp &= ~RADEON_CFG_VGA_IO_DIS;
 	}
 	WREG32(RADEON_CONFIG_CNTL, temp);
 }
@@ -3318,9 +3337,9 @@
 	unsigned long size;
 	unsigned prim_walk;
 	unsigned nverts;
-	unsigned num_cb = track->num_cb;
+	unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
 
-	if (!track->zb_cb_clear && !track->color_channel_mask &&
+	if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
 	    !track->blend_read_enable)
 		num_cb = 0;
 
@@ -3341,7 +3360,9 @@
 			return -EINVAL;
 		}
 	}
-	if (track->z_enabled) {
+	track->cb_dirty = false;
+
+	if (track->zb_dirty && track->z_enabled) {
 		if (track->zb.robj == NULL) {
 			DRM_ERROR("[drm] No buffer for z buffer !\n");
 			return -EINVAL;
@@ -3358,6 +3379,28 @@
 			return -EINVAL;
 		}
 	}
+	track->zb_dirty = false;
+
+	if (track->aa_dirty && track->aaresolve) {
+		if (track->aa.robj == NULL) {
+			DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
+			return -EINVAL;
+		}
+		/* I believe the format comes from colorbuffer0. */
+		size = track->aa.pitch * track->cb[0].cpp * track->maxy;
+		size += track->aa.offset;
+		if (size > radeon_bo_size(track->aa.robj)) {
+			DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
+				  "(need %lu have %lu) !\n", i, size,
+				  radeon_bo_size(track->aa.robj));
+			DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
+				  i, track->aa.pitch, track->cb[0].cpp,
+				  track->aa.offset, track->maxy);
+			return -EINVAL;
+		}
+	}
+	track->aa_dirty = false;
+
 	prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
 	if (track->vap_vf_cntl & (1 << 14)) {
 		nverts = track->vap_alt_nverts;
@@ -3417,13 +3460,23 @@
 			  prim_walk);
 		return -EINVAL;
 	}
-	return r100_cs_track_texture_check(rdev, track);
+
+	if (track->tex_dirty) {
+		track->tex_dirty = false;
+		return r100_cs_track_texture_check(rdev, track);
+	}
+	return 0;
 }
 
 void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
 {
 	unsigned i, face;
 
+	track->cb_dirty = true;
+	track->zb_dirty = true;
+	track->tex_dirty = true;
+	track->aa_dirty = true;
+
 	if (rdev->family < CHIP_R300) {
 		track->num_cb = 1;
 		if (rdev->family <= CHIP_RS200)
@@ -3437,6 +3490,8 @@
 		track->num_texture = 16;
 		track->maxy = 4096;
 		track->separate_cube = 0;
+		track->aaresolve = true;
+		track->aa.robj = NULL;
 	}
 
 	for (i = 0; i < track->num_cb; i++) {
@@ -3522,7 +3577,7 @@
 	if (i < rdev->usec_timeout) {
 		DRM_INFO("ring test succeeded in %d usecs\n", i);
 	} else {
-		DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n",
+		DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
 			  scratch, tmp);
 		r = -EINVAL;
 	}
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index af65600..2fef9de 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -52,14 +52,7 @@
 	unsigned                compress_format;
 };
 
-struct r100_cs_track_limits {
-	unsigned num_cb;
-	unsigned num_texture;
-	unsigned max_levels;
-};
-
 struct r100_cs_track {
-	struct radeon_device *rdev;
 	unsigned			num_cb;
 	unsigned                        num_texture;
 	unsigned			maxy;
@@ -73,11 +66,17 @@
 	struct r100_cs_track_array	arrays[11];
 	struct r100_cs_track_cb 	cb[R300_MAX_CB];
 	struct r100_cs_track_cb 	zb;
+	struct r100_cs_track_cb 	aa;
 	struct r100_cs_track_texture	textures[R300_TRACK_MAX_TEXTURE];
 	bool				z_enabled;
 	bool                            separate_cube;
 	bool				zb_cb_clear;
 	bool				blend_read_enable;
+	bool				cb_dirty;
+	bool				zb_dirty;
+	bool				tex_dirty;
+	bool				aa_dirty;
+	bool				aaresolve;
 };
 
 int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index d2408c3..f240583 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -184,6 +184,7 @@
 		}
 		track->zb.robj = reloc->robj;
 		track->zb.offset = idx_value;
+		track->zb_dirty = true;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		break;
 	case RADEON_RB3D_COLOROFFSET:
@@ -196,6 +197,7 @@
 		}
 		track->cb[0].robj = reloc->robj;
 		track->cb[0].offset = idx_value;
+		track->cb_dirty = true;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		break;
 	case R200_PP_TXOFFSET_0:
@@ -214,6 +216,7 @@
 		}
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		track->textures[i].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	case R200_PP_CUBIC_OFFSET_F1_0:
 	case R200_PP_CUBIC_OFFSET_F2_0:
@@ -257,9 +260,12 @@
 		track->textures[i].cube_info[face - 1].offset = idx_value;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		track->textures[i].cube_info[face - 1].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	case RADEON_RE_WIDTH_HEIGHT:
 		track->maxy = ((idx_value >> 16) & 0x7FF);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_COLORPITCH:
 		r = r100_cs_packet_next_reloc(p, &reloc);
@@ -280,9 +286,11 @@
 		ib[idx] = tmp;
 
 		track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
+		track->cb_dirty = true;
 		break;
 	case RADEON_RB3D_DEPTHPITCH:
 		track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_CNTL:
 		switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
@@ -312,6 +320,8 @@
 		}
 
 		track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
+		track->cb_dirty = true;
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_ZSTENCILCNTL:
 		switch (idx_value & 0xf) {
@@ -329,6 +339,7 @@
 		default:
 			break;
 		}
+		track->zb_dirty = true;
 		break;
 	case RADEON_RB3D_ZPASS_ADDR:
 		r = r100_cs_packet_next_reloc(p, &reloc);
@@ -345,6 +356,7 @@
 			uint32_t temp = idx_value >> 4;
 			for (i = 0; i < track->num_texture; i++)
 				track->textures[i].enabled = !!(temp & (1 << i));
+			track->tex_dirty = true;
 		}
 		break;
 	case RADEON_SE_VF_CNTL:
@@ -369,6 +381,7 @@
 		i = (reg - R200_PP_TXSIZE_0) / 32;
 		track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
 		track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
+		track->tex_dirty = true;
 		break;
 	case R200_PP_TXPITCH_0:
 	case R200_PP_TXPITCH_1:
@@ -378,6 +391,7 @@
 	case R200_PP_TXPITCH_5:
 		i = (reg - R200_PP_TXPITCH_0) / 32;
 		track->textures[i].pitch = idx_value + 32;
+		track->tex_dirty = true;
 		break;
 	case R200_PP_TXFILTER_0:
 	case R200_PP_TXFILTER_1:
@@ -394,6 +408,7 @@
 		tmp = (idx_value >> 27) & 0x7;
 		if (tmp == 2 || tmp == 6)
 			track->textures[i].roundup_h = false;
+		track->tex_dirty = true;
 		break;
 	case R200_PP_TXMULTI_CTL_0:
 	case R200_PP_TXMULTI_CTL_1:
@@ -432,6 +447,7 @@
 			track->textures[i].tex_coord_type = 1;
 			break;
 		}
+		track->tex_dirty = true;
 		break;
 	case R200_PP_TXFORMAT_0:
 	case R200_PP_TXFORMAT_1:
@@ -488,6 +504,7 @@
 		}
 		track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
 		track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
+		track->tex_dirty = true;
 		break;
 	case R200_PP_CUBIC_FACES_0:
 	case R200_PP_CUBIC_FACES_1:
@@ -501,6 +518,7 @@
 			track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
 			track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
 		}
+		track->tex_dirty = true;
 		break;
 	default:
 		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index cf862ca..069efa8 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -69,6 +69,9 @@
 	mb();
 }
 
+#define R300_PTE_WRITEABLE (1 << 2)
+#define R300_PTE_READABLE  (1 << 3)
+
 int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
 {
 	void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
@@ -78,7 +81,7 @@
 	}
 	addr = (lower_32_bits(addr) >> 8) |
 	       ((upper_32_bits(addr) & 0xff) << 24) |
-	       0xc;
+	       R300_PTE_WRITEABLE | R300_PTE_READABLE;
 	/* on x86 we want this to be CPU endian, on powerpc
 	 * on powerpc without HW swappers, it'll get swapped on way
 	 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
@@ -135,7 +138,7 @@
 	WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
 	WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
 	/* Clear error */
-	WREG32_PCIE(0x18, 0);
+	WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0);
 	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
 	tmp |= RADEON_PCIE_TX_GART_EN;
 	tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
@@ -664,6 +667,7 @@
 		}
 		track->cb[i].robj = reloc->robj;
 		track->cb[i].offset = idx_value;
+		track->cb_dirty = true;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		break;
 	case R300_ZB_DEPTHOFFSET:
@@ -676,6 +680,7 @@
 		}
 		track->zb.robj = reloc->robj;
 		track->zb.offset = idx_value;
+		track->zb_dirty = true;
 		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
 		break;
 	case R300_TX_OFFSET_0:
@@ -714,6 +719,7 @@
 		tmp |= tile_flags;
 		ib[idx] = tmp;
 		track->textures[i].robj = reloc->robj;
+		track->tex_dirty = true;
 		break;
 	/* Tracked registers */
 	case 0x2084:
@@ -740,6 +746,8 @@
 		if (p->rdev->family < CHIP_RV515) {
 			track->maxy -= 1440;
 		}
+		track->cb_dirty = true;
+		track->zb_dirty = true;
 		break;
 	case 0x4E00:
 		/* RB3D_CCTL */
@@ -749,6 +757,7 @@
 			return -EINVAL;
 		}
 		track->num_cb = ((idx_value >> 5) & 0x3) + 1;
+		track->cb_dirty = true;
 		break;
 	case 0x4E38:
 	case 0x4E3C:
@@ -811,6 +820,7 @@
 				  ((idx_value >> 21) & 0xF));
 			return -EINVAL;
 		}
+		track->cb_dirty = true;
 		break;
 	case 0x4F00:
 		/* ZB_CNTL */
@@ -819,6 +829,7 @@
 		} else {
 			track->z_enabled = false;
 		}
+		track->zb_dirty = true;
 		break;
 	case 0x4F10:
 		/* ZB_FORMAT */
@@ -835,6 +846,7 @@
 				  (idx_value & 0xF));
 			return -EINVAL;
 		}
+		track->zb_dirty = true;
 		break;
 	case 0x4F24:
 		/* ZB_DEPTHPITCH */
@@ -858,14 +870,17 @@
 		ib[idx] = tmp;
 
 		track->zb.pitch = idx_value & 0x3FFC;
+		track->zb_dirty = true;
 		break;
 	case 0x4104:
+		/* TX_ENABLE */
 		for (i = 0; i < 16; i++) {
 			bool enabled;
 
 			enabled = !!(idx_value & (1 << i));
 			track->textures[i].enabled = enabled;
 		}
+		track->tex_dirty = true;
 		break;
 	case 0x44C0:
 	case 0x44C4:
@@ -895,6 +910,7 @@
 			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
 			break;
 		case R300_TX_FORMAT_X16:
+		case R300_TX_FORMAT_FL_I16:
 		case R300_TX_FORMAT_Y8X8:
 		case R300_TX_FORMAT_Z5Y6X5:
 		case R300_TX_FORMAT_Z6Y5X5:
@@ -907,6 +923,7 @@
 			track->textures[i].compress_format = R100_TRACK_COMP_NONE;
 			break;
 		case R300_TX_FORMAT_Y16X16:
+		case R300_TX_FORMAT_FL_I16A16:
 		case R300_TX_FORMAT_Z11Y11X10:
 		case R300_TX_FORMAT_Z10Y11X11:
 		case R300_TX_FORMAT_W8Z8Y8X8:
@@ -948,8 +965,8 @@
 			DRM_ERROR("Invalid texture format %u\n",
 				  (idx_value & 0x1F));
 			return -EINVAL;
-			break;
 		}
+		track->tex_dirty = true;
 		break;
 	case 0x4400:
 	case 0x4404:
@@ -977,6 +994,7 @@
 		if (tmp == 2 || tmp == 4 || tmp == 6) {
 			track->textures[i].roundup_h = false;
 		}
+		track->tex_dirty = true;
 		break;
 	case 0x4500:
 	case 0x4504:
@@ -1014,6 +1032,7 @@
 			DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
 			return -EINVAL;
 		}
+		track->tex_dirty = true;
 		break;
 	case 0x4480:
 	case 0x4484:
@@ -1043,6 +1062,7 @@
 		track->textures[i].use_pitch = !!tmp;
 		tmp = (idx_value >> 22) & 0xF;
 		track->textures[i].txdepth = tmp;
+		track->tex_dirty = true;
 		break;
 	case R300_ZB_ZPASS_ADDR:
 		r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1057,6 +1077,7 @@
 	case 0x4e0c:
 		/* RB3D_COLOR_CHANNEL_MASK */
 		track->color_channel_mask = idx_value;
+		track->cb_dirty = true;
 		break;
 	case 0x43a4:
 		/* SC_HYPERZ_EN */
@@ -1070,6 +1091,8 @@
 	case 0x4f1c:
 		/* ZB_BW_CNTL */
 		track->zb_cb_clear = !!(idx_value & (1 << 5));
+		track->cb_dirty = true;
+		track->zb_dirty = true;
 		if (p->rdev->hyperz_filp != p->filp) {
 			if (idx_value & (R300_HIZ_ENABLE |
 					 R300_RD_COMP_ENABLE |
@@ -1081,8 +1104,28 @@
 	case 0x4e04:
 		/* RB3D_BLENDCNTL */
 		track->blend_read_enable = !!(idx_value & (1 << 2));
+		track->cb_dirty = true;
 		break;
-	case 0x4f28: /* ZB_DEPTHCLEARVALUE */
+	case R300_RB3D_AARESOLVE_OFFSET:
+		r = r100_cs_packet_next_reloc(p, &reloc);
+		if (r) {
+			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+				  idx, reg);
+			r100_cs_dump_packet(p, pkt);
+			return r;
+		}
+		track->aa.robj = reloc->robj;
+		track->aa.offset = idx_value;
+		track->aa_dirty = true;
+		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
+		break;
+	case R300_RB3D_AARESOLVE_PITCH:
+		track->aa.pitch = idx_value & 0x3FFE;
+		track->aa_dirty = true;
+		break;
+	case R300_RB3D_AARESOLVE_CTL:
+		track->aaresolve = idx_value & 0x1;
+		track->aa_dirty = true;
 		break;
 	case 0x4f30: /* ZB_MASK_OFFSET */
 	case 0x4f34: /* ZB_ZMASK_PITCH */
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 1a0d536..f0bce39 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -1371,6 +1371,8 @@
 #define R300_RB3D_COLORPITCH2               0x4E40 /* GUESS */
 #define R300_RB3D_COLORPITCH3               0x4E44 /* GUESS */
 
+#define R300_RB3D_AARESOLVE_OFFSET          0x4E80
+#define R300_RB3D_AARESOLVE_PITCH           0x4E84
 #define R300_RB3D_AARESOLVE_CTL             0x4E88
 /* gap */
 
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index c387346..0b59ed7 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -96,7 +96,7 @@
 		       "programming pipes. Bad things might happen.\n");
 	}
 	/* get max number of pipes */
-	gb_pipe_select = RREG32(0x402C);
+	gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
 	num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
 
 	/* SE chips have 1 pipe */
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 3c8677f..2ce80d9 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -79,8 +79,8 @@
 		WREG32(0x4128, 0xFF);
 	}
 	r420_pipes_init(rdev);
-	gb_pipe_select = RREG32(0x402C);
-	tmp = RREG32(0x170C);
+	gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
+	tmp = RREG32(R300_DST_PIPE_CONFIG);
 	pipe_select_current = (tmp >> 2) & 3;
 	tmp = (1 << pipe_select_current) |
 	      (((gb_pipe_select >> 8) & 0xF) << 4);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index aca2236..de88624 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -97,12 +97,16 @@
 static void r600_pcie_gen2_enable(struct radeon_device *rdev);
 
 /* get temperature in millidegrees */
-u32 rv6xx_get_temp(struct radeon_device *rdev)
+int rv6xx_get_temp(struct radeon_device *rdev)
 {
 	u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
 		ASIC_T_SHIFT;
+	int actual_temp = temp & 0xff;
 
-	return temp * 1000;
+	if (temp & 0x100)
+		actual_temp -= 256;
+
+	return actual_temp * 1000;
 }
 
 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
@@ -1287,6 +1291,9 @@
 			S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
 	u32 tmp;
 
+	if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
+		return 0;
+
 	dev_info(rdev->dev, "GPU softreset \n");
 	dev_info(rdev->dev, "  R_008010_GRBM_STATUS=0x%08X\n",
 		RREG32(R_008010_GRBM_STATUS));
@@ -2098,7 +2105,11 @@
 
 	r600_cp_stop(rdev);
 
-	WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
+	WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+	       BUF_SWAP_32BIT |
+#endif
+	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
 
 	/* Reset cp */
 	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
@@ -2185,7 +2196,11 @@
 	WREG32(CP_RB_WPTR, 0);
 
 	/* set the wb address whether it's enabled or not */
-	WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
+	WREG32(CP_RB_RPTR_ADDR,
+#ifdef __BIG_ENDIAN
+	       RB_RPTR_SWAP(2) |
+#endif
+	       ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
 	WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
 	WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
 
@@ -2621,7 +2636,11 @@
 {
 	/* FIXME: implement */
 	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-	radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
+	radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (ib->gpu_addr & 0xFFFFFFFC));
 	radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
 	radeon_ring_write(rdev, ib->length_dw);
 }
@@ -3290,8 +3309,8 @@
 	while (rptr != wptr) {
 		/* wptr/rptr are in bytes! */
 		ring_index = rptr / 4;
-		src_id =  rdev->ih.ring[ring_index] & 0xff;
-		src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
+		src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
+		src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
 
 		switch (src_id) {
 		case 1: /* D1 vblank/vline */
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index ca5c29f..7f10434 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -137,9 +137,9 @@
 	ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256);
 
 	for (i = 0; i < r6xx_vs_size; i++)
-		vs[i] = r6xx_vs[i];
+		vs[i] = cpu_to_le32(r6xx_vs[i]);
 	for (i = 0; i < r6xx_ps_size; i++)
-		ps[i] = r6xx_ps[i];
+		ps[i] = cpu_to_le32(r6xx_ps[i]);
 
 	dev_priv->blit_vb->used = 512;
 
@@ -192,6 +192,9 @@
 	DRM_DEBUG("\n");
 
 	sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8));
+#ifdef __BIG_ENDIAN
+	sq_vtx_constant_word2 |= (2 << 30);
+#endif
 
 	BEGIN_RING(9);
 	OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
@@ -291,7 +294,11 @@
 	OUT_RING(DI_PT_RECTLIST);
 
 	OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
+#ifdef __BIG_ENDIAN
+	OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT);
+#else
 	OUT_RING(DI_INDEX_SIZE_16_BIT);
+#endif
 
 	OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
 	OUT_RING(1);
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 86e5aa0..41f7aaf 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -54,7 +54,7 @@
 	if (h < 8)
 		h = 8;
 
-	cb_color_info = ((format << 2) | (1 << 27));
+	cb_color_info = ((format << 2) | (1 << 27) | (1 << 8));
 	pitch = (w / 8) - 1;
 	slice = ((w * h) / 64) - 1;
 
@@ -165,6 +165,9 @@
 	u32 sq_vtx_constant_word2;
 
 	sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
+#ifdef __BIG_ENDIAN
+	sq_vtx_constant_word2 |= (2 << 30);
+#endif
 
 	radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
 	radeon_ring_write(rdev, 0x460);
@@ -199,7 +202,7 @@
 	if (h < 1)
 		h = 1;
 
-	sq_tex_resource_word0 = (1 << 0);
+	sq_tex_resource_word0 = (1 << 0) | (1 << 3);
 	sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
 				  ((w - 1) << 19));
 
@@ -253,7 +256,11 @@
 	radeon_ring_write(rdev, DI_PT_RECTLIST);
 
 	radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
-	radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT);
+	radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+			  (2 << 2) |
+#endif
+			  DI_INDEX_SIZE_16_BIT);
 
 	radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
 	radeon_ring_write(rdev, 1);
@@ -424,7 +431,11 @@
 	dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
 	gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
 	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-	radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
+	radeon_ring_write(rdev,
+#ifdef __BIG_ENDIAN
+			  (2 << 0) |
+#endif
+			  (gpu_addr & 0xFFFFFFFC));
 	radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
 	radeon_ring_write(rdev, dwords);
 
@@ -467,7 +478,7 @@
 int r600_blit_init(struct radeon_device *rdev)
 {
 	u32 obj_size;
-	int r, dwords;
+	int i, r, dwords;
 	void *ptr;
 	u32 packet2s[16];
 	int num_packet2s = 0;
@@ -486,7 +497,7 @@
 
 	dwords = rdev->r600_blit.state_len;
 	while (dwords & 0xf) {
-		packet2s[num_packet2s++] = PACKET2(0);
+		packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
 		dwords++;
 	}
 
@@ -529,8 +540,10 @@
 	if (num_packet2s)
 		memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
 			    packet2s, num_packet2s * 4);
-	memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4);
-	memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
+	for (i = 0; i < r6xx_vs_size; i++)
+		*(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]);
+	for (i = 0; i < r6xx_ps_size; i++)
+		*(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]);
 	radeon_bo_kunmap(rdev->r600_blit.shader_obj);
 	radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index e8151c1..2d1f6c5 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -684,7 +684,11 @@
 	0x00000000,
 	0x3c000000,
 	0x68cd1000,
+#ifdef __BIG_ENDIAN
+	0x000a0000,
+#else
 	0x00080000,
+#endif
 	0x00000000,
 };
 
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 4f4cd8b..c3ab959 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -396,6 +396,9 @@
 	r600_do_cp_stop(dev_priv);
 
 	RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+		     R600_BUF_SWAP_32BIT |
+#endif
 		     R600_RB_NO_UPDATE |
 		     R600_RB_BLKSZ(15) |
 		     R600_RB_BUFSZ(3));
@@ -486,9 +489,12 @@
 	r600_do_cp_stop(dev_priv);
 
 	RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+		     R600_BUF_SWAP_32BIT |
+#endif
 		     R600_RB_NO_UPDATE |
-		     (15 << 8) |
-		     (3 << 0));
+		     R600_RB_BLKSZ(15) |
+		     R600_RB_BUFSZ(3));
 
 	RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
 	RADEON_READ(R600_GRBM_SOFT_RESET);
@@ -550,8 +556,12 @@
 
 	if (!dev_priv->writeback_works) {
 		/* Disable writeback to avoid unnecessary bus master transfer */
-		RADEON_WRITE(R600_CP_RB_CNTL, RADEON_READ(R600_CP_RB_CNTL) |
-			     RADEON_RB_NO_UPDATE);
+		RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+			     R600_BUF_SWAP_32BIT |
+#endif
+			     RADEON_READ(R600_CP_RB_CNTL) |
+			     R600_RB_NO_UPDATE);
 		RADEON_WRITE(R600_SCRATCH_UMSK, 0);
 	}
 }
@@ -575,7 +585,11 @@
 
 	RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
 	cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL);
-	RADEON_WRITE(R600_CP_RB_CNTL, R600_RB_RPTR_WR_ENA);
+	RADEON_WRITE(R600_CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+		     R600_BUF_SWAP_32BIT |
+#endif
+		     R600_RB_RPTR_WR_ENA);
 
 	RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr);
 	RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr);
@@ -1838,7 +1852,10 @@
 			+ dev_priv->gart_vm_start;
 	}
 	RADEON_WRITE(R600_CP_RB_RPTR_ADDR,
-		     rptr_addr & 0xffffffff);
+#ifdef __BIG_ENDIAN
+		     (2 << 0) |
+#endif
+		     (rptr_addr & 0xfffffffc));
 	RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI,
 		     upper_32_bits(rptr_addr));
 
@@ -1889,7 +1906,7 @@
 	{
 		u64 scratch_addr;
 
-		scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR);
+		scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR) & 0xFFFFFFFC;
 		scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32;
 		scratch_addr += R600_SCRATCH_REG_OFFSET;
 		scratch_addr >>= 8;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 7831e08..153095f 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -295,17 +295,18 @@
 	}
 
 	if (!IS_ALIGNED(pitch, pitch_align)) {
-		dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
-			 __func__, __LINE__, pitch);
+		dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
+			 __func__, __LINE__, pitch, pitch_align, array_mode);
 		return -EINVAL;
 	}
 	if (!IS_ALIGNED(height, height_align)) {
-		dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
-			 __func__, __LINE__, height);
+		dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
+			 __func__, __LINE__, height, height_align, array_mode);
 		return -EINVAL;
 	}
 	if (!IS_ALIGNED(base_offset, base_align)) {
-		dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
+		dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
+			 base_offset, base_align, array_mode);
 		return -EINVAL;
 	}
 
@@ -320,7 +321,10 @@
 			 * broken userspace.
 			 */
 		} else {
-			dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
+			dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i,
+				 array_mode,
+				 track->cb_color_bo_offset[i], tmp,
+				 radeon_bo_size(track->cb_color_bo[i]));
 			return -EINVAL;
 		}
 	}
@@ -455,17 +459,18 @@
 			}
 
 			if (!IS_ALIGNED(pitch, pitch_align)) {
-				dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
-					 __func__, __LINE__, pitch);
+				dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
+					 __func__, __LINE__, pitch, pitch_align, array_mode);
 				return -EINVAL;
 			}
 			if (!IS_ALIGNED(height, height_align)) {
-				dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
-					 __func__, __LINE__, height);
+				dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
+					 __func__, __LINE__, height, height_align, array_mode);
 				return -EINVAL;
 			}
 			if (!IS_ALIGNED(base_offset, base_align)) {
-				dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset);
+				dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
+					 base_offset, base_align, array_mode);
 				return -EINVAL;
 			}
 
@@ -473,9 +478,10 @@
 			nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
 			tmp = ntiles * bpe * 64 * nviews;
 			if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
-				dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n",
-						track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
-						radeon_bo_size(track->db_bo));
+				dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
+					 array_mode,
+					 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+					 radeon_bo_size(track->db_bo));
 				return -EINVAL;
 			}
 		}
@@ -1227,18 +1233,18 @@
 	/* XXX check height as well... */
 
 	if (!IS_ALIGNED(pitch, pitch_align)) {
-		dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
-			 __func__, __LINE__, pitch);
+		dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
+			 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
 		return -EINVAL;
 	}
 	if (!IS_ALIGNED(base_offset, base_align)) {
-		dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n",
-			 __func__, __LINE__, base_offset);
+		dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
+			 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
 		return -EINVAL;
 	}
 	if (!IS_ALIGNED(mip_offset, base_align)) {
-		dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n",
-			 __func__, __LINE__, mip_offset);
+		dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
+			 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
 		return -EINVAL;
 	}
 
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 33cda01..f869897 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -81,7 +81,11 @@
 #define R600_MEDIUM_VID_LOWER_GPIO_CNTL                            0x720
 #define R600_LOW_VID_LOWER_GPIO_CNTL                               0x724
 
-
+#define R600_D1GRPH_SWAP_CONTROL                               0x610C
+#       define R600_D1GRPH_SWAP_ENDIAN_NONE                    (0 << 0)
+#       define R600_D1GRPH_SWAP_ENDIAN_16BIT                   (1 << 0)
+#       define R600_D1GRPH_SWAP_ENDIAN_32BIT                   (2 << 0)
+#       define R600_D1GRPH_SWAP_ENDIAN_64BIT                   (3 << 0)
 
 #define R600_HDP_NONSURFACE_BASE                                0x2c04
 
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index a5d898b..04bac0b 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -154,13 +154,14 @@
 #define		ROQ_IB2_START(x)				((x) << 8)
 #define	CP_RB_BASE					0xC100
 #define	CP_RB_CNTL					0xC104
-#define		RB_BUFSZ(x)					((x)<<0)
-#define		RB_BLKSZ(x)					((x)<<8)
-#define		RB_NO_UPDATE					(1<<27)
-#define		RB_RPTR_WR_ENA					(1<<31)
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1 << 31)
 #define		BUF_SWAP_32BIT					(2 << 16)
 #define	CP_RB_RPTR					0x8700
 #define	CP_RB_RPTR_ADDR					0xC10C
+#define		RB_RPTR_SWAP(x)					((x) << 0)
 #define	CP_RB_RPTR_ADDR_HI				0xC110
 #define	CP_RB_RPTR_WR					0xC108
 #define	CP_RB_WPTR					0xC114
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 71d2a55..56c48b6 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -179,10 +179,10 @@
 void radeon_atombios_get_power_modes(struct radeon_device *rdev);
 void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
 void rs690_pm_info(struct radeon_device *rdev);
-extern u32 rv6xx_get_temp(struct radeon_device *rdev);
-extern u32 rv770_get_temp(struct radeon_device *rdev);
-extern u32 evergreen_get_temp(struct radeon_device *rdev);
-extern u32 sumo_get_temp(struct radeon_device *rdev);
+extern int rv6xx_get_temp(struct radeon_device *rdev);
+extern int rv770_get_temp(struct radeon_device *rdev);
+extern int evergreen_get_temp(struct radeon_device *rdev);
+extern int sumo_get_temp(struct radeon_device *rdev);
 
 /*
  * Fences.
@@ -812,8 +812,7 @@
 	fixed20_12		sclk;
 	fixed20_12		mclk;
 	fixed20_12		needed_bandwidth;
-	/* XXX: use a define for num power modes */
-	struct radeon_power_state power_state[8];
+	struct radeon_power_state *power_state;
 	/* number of valid power states */
 	int                     num_power_states;
 	int                     current_power_state_index;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 3a1b161..e75d63b 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -759,7 +759,7 @@
 	.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
 	.gart_set_page = &rs600_gart_set_page,
 	.ring_test = &r600_ring_test,
-	.ring_ib_execute = &r600_ring_ib_execute,
+	.ring_ib_execute = &evergreen_ring_ib_execute,
 	.irq_set = &evergreen_irq_set,
 	.irq_process = &evergreen_irq_process,
 	.get_vblank_counter = &evergreen_get_vblank_counter,
@@ -805,7 +805,7 @@
 	.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
 	.gart_set_page = &rs600_gart_set_page,
 	.ring_test = &r600_ring_test,
-	.ring_ib_execute = &r600_ring_ib_execute,
+	.ring_ib_execute = &evergreen_ring_ib_execute,
 	.irq_set = &evergreen_irq_set,
 	.irq_process = &evergreen_irq_process,
 	.get_vblank_counter = &evergreen_get_vblank_counter,
@@ -848,7 +848,7 @@
 	.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
 	.gart_set_page = &rs600_gart_set_page,
 	.ring_test = &r600_ring_test,
-	.ring_ib_execute = &r600_ring_ib_execute,
+	.ring_ib_execute = &evergreen_ring_ib_execute,
 	.irq_set = &evergreen_irq_set,
 	.irq_process = &evergreen_irq_process,
 	.get_vblank_counter = &evergreen_get_vblank_counter,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index e01f077..c59bd98 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -355,6 +355,7 @@
 bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
 int evergreen_asic_reset(struct radeon_device *rdev);
 void evergreen_bandwidth_update(struct radeon_device *rdev);
+void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int evergreen_copy_blit(struct radeon_device *rdev,
 			uint64_t src_offset, uint64_t dst_offset,
 			unsigned num_pages, struct radeon_fence *fence);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 1573202..02d5c41 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -88,7 +88,7 @@
 			/* some evergreen boards have bad data for this entry */
 			if (ASIC_IS_DCE4(rdev)) {
 				if ((i == 7) &&
-				    (gpio->usClkMaskRegisterIndex == 0x1936) &&
+				    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
 				    (gpio->sucI2cId.ucAccess == 0)) {
 					gpio->sucI2cId.ucAccess = 0x97;
 					gpio->ucDataMaskShift = 8;
@@ -101,7 +101,7 @@
 			/* some DCE3 boards have bad data for this entry */
 			if (ASIC_IS_DCE3(rdev)) {
 				if ((i == 4) &&
-				    (gpio->usClkMaskRegisterIndex == 0x1fda) &&
+				    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
 				    (gpio->sucI2cId.ucAccess == 0x94))
 					gpio->sucI2cId.ucAccess = 0x14;
 			}
@@ -172,7 +172,7 @@
 			/* some evergreen boards have bad data for this entry */
 			if (ASIC_IS_DCE4(rdev)) {
 				if ((i == 7) &&
-				    (gpio->usClkMaskRegisterIndex == 0x1936) &&
+				    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
 				    (gpio->sucI2cId.ucAccess == 0)) {
 					gpio->sucI2cId.ucAccess = 0x97;
 					gpio->ucDataMaskShift = 8;
@@ -185,7 +185,7 @@
 			/* some DCE3 boards have bad data for this entry */
 			if (ASIC_IS_DCE3(rdev)) {
 				if ((i == 4) &&
-				    (gpio->usClkMaskRegisterIndex == 0x1fda) &&
+				    (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
 				    (gpio->sucI2cId.ucAccess == 0x94))
 					gpio->sucI2cId.ucAccess = 0x14;
 			}
@@ -252,7 +252,7 @@
 			pin = &gpio_info->asGPIO_Pin[i];
 			if (id == pin->ucGPIO_ID) {
 				gpio.id = pin->ucGPIO_ID;
-				gpio.reg = pin->usGpioPin_AIndex * 4;
+				gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
 				gpio.mask = (1 << pin->ucGpioPinBitShift);
 				gpio.valid = true;
 				break;
@@ -387,15 +387,11 @@
 			*line_mux = 0x90;
 	}
 
-	/* mac rv630 */
-	if ((dev->pdev->device == 0x9588) &&
-	    (dev->pdev->subsystem_vendor == 0x106b) &&
-	    (dev->pdev->subsystem_device == 0x00a6)) {
-		if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
-		    (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
-			*connector_type = DRM_MODE_CONNECTOR_9PinDIN;
-			*line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
-		}
+	/* mac rv630, rv730, others */
+	if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
+	    (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
+		*connector_type = DRM_MODE_CONNECTOR_9PinDIN;
+		*line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
 	}
 
 	/* ASUS HD 3600 XT board lists the DVI port as HDMI */
@@ -1167,16 +1163,6 @@
 				p1pll->pll_out_min = 64800;
 			else
 				p1pll->pll_out_min = 20000;
-		} else if (p1pll->pll_out_min > 64800) {
-			/* Limiting the pll output range is a good thing generally as
-			 * it limits the number of possible pll combinations for a given
-			 * frequency presumably to the ones that work best on each card.
-			 * However, certain duallink DVI monitors seem to like
-			 * pll combinations that would be limited by this at least on
-			 * pre-DCE 3.0 r6xx hardware.  This might need to be adjusted per
-			 * family.
-			 */
-			p1pll->pll_out_min = 64800;
 		}
 
 		p1pll->pll_in_min =
@@ -1288,11 +1274,11 @@
 				      data_offset);
 		switch (crev) {
 		case 1:
-			if (igp_info->info.ulBootUpMemoryClock)
+			if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock))
 				return true;
 			break;
 		case 2:
-			if (igp_info->info_2.ulBootUpSidePortClock)
+			if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock))
 				return true;
 			break;
 		default:
@@ -1456,7 +1442,7 @@
 
 			for (i = 0; i < num_indices; i++) {
 				if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
-				    (clock <= ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) {
+				    (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
 					ss->percentage =
 						le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
 					ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1470,7 +1456,7 @@
 				sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
 			for (i = 0; i < num_indices; i++) {
 				if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
-				    (clock <= ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) {
+				    (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
 					ss->percentage =
 						le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
 					ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1484,7 +1470,7 @@
 				sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
 			for (i = 0; i < num_indices; i++) {
 				if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
-				    (clock <= ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) {
+				    (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
 					ss->percentage =
 						le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
 					ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1567,8 +1553,8 @@
 		if (misc & ATOM_DOUBLE_CLOCK_MODE)
 			lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
 
-		lvds->native_mode.width_mm = lvds_info->info.sLCDTiming.usImageHSize;
-		lvds->native_mode.height_mm = lvds_info->info.sLCDTiming.usImageVSize;
+		lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize);
+		lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize);
 
 		/* set crtc values */
 		drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
@@ -1583,13 +1569,13 @@
 			lvds->linkb = false;
 
 		/* parse the lcd record table */
-		if (lvds_info->info.usModePatchTableOffset) {
+		if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) {
 			ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
 			ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
 			bool bad_record = false;
 			u8 *record = (u8 *)(mode_info->atom_context->bios +
 					    data_offset +
-					    lvds_info->info.usModePatchTableOffset);
+					    le16_to_cpu(lvds_info->info.usModePatchTableOffset));
 			while (*record != ATOM_RECORD_END_TYPE) {
 				switch (*record) {
 				case LCD_MODE_PATCH_RECORD_MODE_TYPE:
@@ -1991,6 +1977,9 @@
 	num_modes = power_info->info.ucNumOfPowerModeEntries;
 	if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
 		num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+	rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
+	if (!rdev->pm.power_state)
+		return state_index;
 	/* last mode is usually default, array is low to high */
 	for (i = 0; i < num_modes; i++) {
 		rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
@@ -2200,7 +2189,7 @@
 		firmware_info =
 			(union firmware_info *)(mode_info->atom_context->bios +
 						data_offset);
-		vddc = firmware_info->info_14.usBootUpVDDCVoltage;
+		vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
 	}
 
 	return vddc;
@@ -2295,7 +2284,7 @@
 		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
 			VOLTAGE_SW;
 		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
-			clock_info->evergreen.usVDDC;
+			le16_to_cpu(clock_info->evergreen.usVDDC);
 	} else {
 		sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
 		sclk |= clock_info->r600.ucEngineClockHigh << 16;
@@ -2306,7 +2295,7 @@
 		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
 			VOLTAGE_SW;
 		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
-			clock_info->r600.usVDDC;
+			le16_to_cpu(clock_info->r600.usVDDC);
 	}
 
 	if (rdev->flags & RADEON_IS_IGP) {
@@ -2342,6 +2331,10 @@
 	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 
 	radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+	rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
+				       power_info->pplib.ucNumStates, GFP_KERNEL);
+	if (!rdev->pm.power_state)
+		return state_index;
 	/* first mode is usually default, followed by low to high */
 	for (i = 0; i < power_info->pplib.ucNumStates; i++) {
 		mode_index = 0;
@@ -2415,13 +2408,17 @@
 	radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
 	state_array = (struct StateArray *)
 		(mode_info->atom_context->bios + data_offset +
-		 power_info->pplib.usStateArrayOffset);
+		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
 	clock_info_array = (struct ClockInfoArray *)
 		(mode_info->atom_context->bios + data_offset +
-		 power_info->pplib.usClockInfoArrayOffset);
+		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
 	non_clock_info_array = (struct NonClockInfoArray *)
 		(mode_info->atom_context->bios + data_offset +
-		 power_info->pplib.usNonClockInfoArrayOffset);
+		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+	rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
+				       state_array->ucNumEntries, GFP_KERNEL);
+	if (!rdev->pm.power_state)
+		return state_index;
 	for (i = 0; i < state_array->ucNumEntries; i++) {
 		mode_index = 0;
 		power_state = (union pplib_power_state *)&state_array->states[i];
@@ -2495,19 +2492,22 @@
 			break;
 		}
 	} else {
-		/* add the default mode */
-		rdev->pm.power_state[state_index].type =
-			POWER_STATE_TYPE_DEFAULT;
-		rdev->pm.power_state[state_index].num_clock_modes = 1;
-		rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
-		rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
-		rdev->pm.power_state[state_index].default_clock_mode =
-			&rdev->pm.power_state[state_index].clock_info[0];
-		rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
-		rdev->pm.power_state[state_index].pcie_lanes = 16;
-		rdev->pm.default_power_state_index = state_index;
-		rdev->pm.power_state[state_index].flags = 0;
-		state_index++;
+		rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
+		if (rdev->pm.power_state) {
+			/* add the default mode */
+			rdev->pm.power_state[state_index].type =
+				POWER_STATE_TYPE_DEFAULT;
+			rdev->pm.power_state[state_index].num_clock_modes = 1;
+			rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+			rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+			rdev->pm.power_state[state_index].default_clock_mode =
+				&rdev->pm.power_state[state_index].clock_info[0];
+			rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+			rdev->pm.power_state[state_index].pcie_lanes = 16;
+			rdev->pm.default_power_state_index = state_index;
+			rdev->pm.power_state[state_index].flags = 0;
+			state_index++;
+		}
 	}
 
 	rdev->pm.num_power_states = state_index;
@@ -2533,7 +2533,7 @@
 	int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
 
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-	return args.ulReturnEngineClock;
+	return le32_to_cpu(args.ulReturnEngineClock);
 }
 
 uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
@@ -2542,7 +2542,7 @@
 	int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
 
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-	return args.ulReturnMemoryClock;
+	return le32_to_cpu(args.ulReturnMemoryClock);
 }
 
 void radeon_atom_set_engine_clock(struct radeon_device *rdev,
@@ -2551,7 +2551,7 @@
 	SET_ENGINE_CLOCK_PS_ALLOCATION args;
 	int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
 
-	args.ulTargetEngineClock = eng_clock;	/* 10 khz */
+	args.ulTargetEngineClock = cpu_to_le32(eng_clock);	/* 10 khz */
 
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 }
@@ -2565,7 +2565,7 @@
 	if (rdev->flags & RADEON_IS_IGP)
 		return;
 
-	args.ulTargetMemoryClock = mem_clock;	/* 10 khz */
+	args.ulTargetMemoryClock = cpu_to_le32(mem_clock);	/* 10 khz */
 
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 }
@@ -2623,7 +2623,7 @@
 	bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
 
 	/* tell the bios not to handle mode switching */
-	bios_6_scratch |= (ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH | ATOM_S6_ACC_MODE);
+	bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
 
 	if (rdev->family >= CHIP_R600) {
 		WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
@@ -2674,10 +2674,13 @@
 	else
 		bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH);
 
-	if (lock)
+	if (lock) {
 		bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
-	else
+		bios_6_scratch &= ~ATOM_S6_ACC_MODE;
+	} else {
 		bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
+		bios_6_scratch |= ATOM_S6_ACC_MODE;
+	}
 
 	if (rdev->family >= CHIP_R600)
 		WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 591fcae..cf7c8d5 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1504,6 +1504,11 @@
 			   (rdev->pdev->subsystem_device == 0x4a48)) {
 			/* Mac X800 */
 			rdev->mode_info.connector_table = CT_MAC_X800;
+		} else if ((rdev->pdev->device == 0x4150) &&
+			   (rdev->pdev->subsystem_vendor == 0x1002) &&
+			   (rdev->pdev->subsystem_device == 0x4150)) {
+			/* Mac G5 9600 */
+			rdev->mode_info.connector_table = CT_MAC_G5_9600;
 		} else
 #endif /* CONFIG_PPC_PMAC */
 #ifdef CONFIG_PPC64
@@ -2022,6 +2027,48 @@
 					    CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
 					    &hpd);
 		break;
+	case CT_MAC_G5_9600:
+		DRM_INFO("Connector Table: %d (mac g5 9600)\n",
+			 rdev->mode_info.connector_table);
+		/* DVI - tv dac, dvo */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+		hpd.hpd = RADEON_HPD_1; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_DFP2_SUPPORT,
+								  0),
+					  ATOM_DEVICE_DFP2_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_CRT2_SUPPORT,
+								  2),
+					  ATOM_DEVICE_CRT2_SUPPORT);
+		radeon_add_legacy_connector(dev, 0,
+					    ATOM_DEVICE_DFP2_SUPPORT |
+					    ATOM_DEVICE_CRT2_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		/* ADC - primary dac, internal tmds */
+		ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+		hpd.hpd = RADEON_HPD_2; /* ??? */
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_DFP1_SUPPORT,
+								  0),
+					  ATOM_DEVICE_DFP1_SUPPORT);
+		radeon_add_legacy_encoder(dev,
+					  radeon_get_encoder_enum(dev,
+								  ATOM_DEVICE_CRT1_SUPPORT,
+								  1),
+					  ATOM_DEVICE_CRT1_SUPPORT);
+		radeon_add_legacy_connector(dev, 1,
+					    ATOM_DEVICE_DFP1_SUPPORT |
+					    ATOM_DEVICE_CRT1_SUPPORT,
+					    DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
+					    CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
+					    &hpd);
+		break;
 	default:
 		DRM_INFO("Connector table: %d (invalid)\n",
 			 rdev->mode_info.connector_table);
@@ -2442,6 +2489,17 @@
 
 	rdev->pm.default_power_state_index = -1;
 
+	/* allocate 2 power states */
+	rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
+	if (!rdev->pm.power_state) {
+		rdev->pm.default_power_state_index = state_index;
+		rdev->pm.num_power_states = 0;
+
+		rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
+		rdev->pm.current_clock_mode_index = 0;
+		return;
+	}
+
 	if (rdev->flags & RADEON_IS_MOBILITY) {
 		offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
 		if (offset) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 26091d6..4954e2d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -891,9 +891,9 @@
 		pci_disable_device(dev->pdev);
 		pci_set_power_state(dev->pdev, PCI_D3hot);
 	}
-	acquire_console_sem();
+	console_lock();
 	radeon_fbdev_set_suspend(rdev, 1);
-	release_console_sem();
+	console_unlock();
 	return 0;
 }
 
@@ -905,11 +905,11 @@
 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
-	acquire_console_sem();
+	console_lock();
 	pci_set_power_state(dev->pdev, PCI_D0);
 	pci_restore_state(dev->pdev);
 	if (pci_enable_device(dev->pdev)) {
-		release_console_sem();
+		console_unlock();
 		return -1;
 	}
 	pci_set_master(dev->pdev);
@@ -920,7 +920,7 @@
 	radeon_restore_bios_scratch_regs(rdev);
 
 	radeon_fbdev_set_suspend(rdev, 0);
-	release_console_sem();
+	console_unlock();
 
 	/* reset hpd state */
 	radeon_hpd_init(rdev);
@@ -936,8 +936,11 @@
 int radeon_gpu_reset(struct radeon_device *rdev)
 {
 	int r;
+	int resched;
 
 	radeon_save_bios_scratch_regs(rdev);
+	/* block TTM */
+	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
 	radeon_suspend(rdev);
 
 	r = radeon_asic_reset(rdev);
@@ -946,6 +949,7 @@
 		radeon_resume(rdev);
 		radeon_restore_bios_scratch_regs(rdev);
 		drm_helper_resume_force_mode(rdev->ddev);
+		ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
 		return 0;
 	}
 	/* bad news, how to tell it to userspace ? */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index d26dabf..0e65709 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -780,6 +780,125 @@
 	return ret;
 }
 
+/* avivo */
+static void avivo_get_fb_div(struct radeon_pll *pll,
+			     u32 target_clock,
+			     u32 post_div,
+			     u32 ref_div,
+			     u32 *fb_div,
+			     u32 *frac_fb_div)
+{
+	u32 tmp = post_div * ref_div;
+
+	tmp *= target_clock;
+	*fb_div = tmp / pll->reference_freq;
+	*frac_fb_div = tmp % pll->reference_freq;
+
+        if (*fb_div > pll->max_feedback_div)
+		*fb_div = pll->max_feedback_div;
+        else if (*fb_div < pll->min_feedback_div)
+                *fb_div = pll->min_feedback_div;
+}
+
+static u32 avivo_get_post_div(struct radeon_pll *pll,
+			      u32 target_clock)
+{
+	u32 vco, post_div, tmp;
+
+	if (pll->flags & RADEON_PLL_USE_POST_DIV)
+		return pll->post_div;
+
+	if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
+		if (pll->flags & RADEON_PLL_IS_LCD)
+			vco = pll->lcd_pll_out_min;
+		else
+			vco = pll->pll_out_min;
+	} else {
+		if (pll->flags & RADEON_PLL_IS_LCD)
+			vco = pll->lcd_pll_out_max;
+		else
+			vco = pll->pll_out_max;
+	}
+
+	post_div = vco / target_clock;
+	tmp = vco % target_clock;
+
+	if (pll->flags & RADEON_PLL_PREFER_MINM_OVER_MAXP) {
+		if (tmp)
+			post_div++;
+	} else {
+		if (!tmp)
+			post_div--;
+	}
+
+	if (post_div > pll->max_post_div)
+		post_div = pll->max_post_div;
+	else if (post_div < pll->min_post_div)
+		post_div = pll->min_post_div;
+
+	return post_div;
+}
+
+#define MAX_TOLERANCE 10
+
+void radeon_compute_pll_avivo(struct radeon_pll *pll,
+			      u32 freq,
+			      u32 *dot_clock_p,
+			      u32 *fb_div_p,
+			      u32 *frac_fb_div_p,
+			      u32 *ref_div_p,
+			      u32 *post_div_p)
+{
+	u32 target_clock = freq / 10;
+	u32 post_div = avivo_get_post_div(pll, target_clock);
+	u32 ref_div = pll->min_ref_div;
+	u32 fb_div = 0, frac_fb_div = 0, tmp;
+
+	if (pll->flags & RADEON_PLL_USE_REF_DIV)
+		ref_div = pll->reference_div;
+
+	if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+		avivo_get_fb_div(pll, target_clock, post_div, ref_div, &fb_div, &frac_fb_div);
+		frac_fb_div = (100 * frac_fb_div) / pll->reference_freq;
+		if (frac_fb_div >= 5) {
+			frac_fb_div -= 5;
+			frac_fb_div = frac_fb_div / 10;
+			frac_fb_div++;
+		}
+		if (frac_fb_div >= 10) {
+			fb_div++;
+			frac_fb_div = 0;
+		}
+	} else {
+		while (ref_div <= pll->max_ref_div) {
+			avivo_get_fb_div(pll, target_clock, post_div, ref_div,
+					 &fb_div, &frac_fb_div);
+			if (frac_fb_div >= (pll->reference_freq / 2))
+				fb_div++;
+			frac_fb_div = 0;
+			tmp = (pll->reference_freq * fb_div) / (post_div * ref_div);
+			tmp = (tmp * 10000) / target_clock;
+
+			if (tmp > (10000 + MAX_TOLERANCE))
+				ref_div++;
+			else if (tmp >= (10000 - MAX_TOLERANCE))
+				break;
+			else
+				ref_div++;
+		}
+	}
+
+	*dot_clock_p = ((pll->reference_freq * fb_div * 10) + (pll->reference_freq * frac_fb_div)) /
+		(ref_div * post_div * 10);
+	*fb_div_p = fb_div;
+	*frac_fb_div_p = frac_fb_div;
+	*ref_div_p = ref_div;
+	*post_div_p = post_div;
+	DRM_DEBUG_KMS("%d, pll dividers - fb: %d.%d ref: %d, post %d\n",
+		      *dot_clock_p, fb_div, frac_fb_div, ref_div, post_div);
+}
+
+/* pre-avivo */
 static inline uint32_t radeon_div(uint64_t n, uint32_t d)
 {
 	uint64_t mod;
@@ -790,13 +909,13 @@
 	return n;
 }
 
-void radeon_compute_pll(struct radeon_pll *pll,
-			uint64_t freq,
-			uint32_t *dot_clock_p,
-			uint32_t *fb_div_p,
-			uint32_t *frac_fb_div_p,
-			uint32_t *ref_div_p,
-			uint32_t *post_div_p)
+void radeon_compute_pll_legacy(struct radeon_pll *pll,
+			       uint64_t freq,
+			       uint32_t *dot_clock_p,
+			       uint32_t *fb_div_p,
+			       uint32_t *frac_fb_div_p,
+			       uint32_t *ref_div_p,
+			       uint32_t *post_div_p)
 {
 	uint32_t min_ref_div = pll->min_ref_div;
 	uint32_t max_ref_div = pll->max_ref_div;
@@ -826,6 +945,9 @@
 		pll_out_max = pll->pll_out_max;
 	}
 
+	if (pll_out_min > 64800)
+		pll_out_min = 64800;
+
 	if (pll->flags & RADEON_PLL_USE_REF_DIV)
 		min_ref_div = max_ref_div = pll->reference_div;
 	else {
@@ -849,7 +971,7 @@
 		max_fractional_feed_div = pll->max_frac_feedback_div;
 	}
 
-	for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
+	for (post_div = min_post_div; post_div <= max_post_div; ++post_div) {
 		uint32_t ref_div;
 
 		if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
@@ -965,6 +1087,10 @@
 	*frac_fb_div_p = best_frac_feedback_div;
 	*ref_div_p = best_ref_div;
 	*post_div_p = best_post_div;
+	DRM_DEBUG_KMS("%d %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
+		      freq, best_freq / 1000, best_feedback_div, best_frac_feedback_div,
+		      best_ref_div, best_post_div);
+
 }
 
 static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index d5680a0..275b26a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -48,7 +48,7 @@
  * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
  * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
  *   2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
- *   2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK
+ *   2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK, clock crystal query
  */
 #define KMS_DRIVER_MAJOR	2
 #define KMS_DRIVER_MINOR	8
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 448eba8..5cba46b 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -1524,6 +1524,7 @@
 #define R600_CP_RB_CNTL                                        0xc104
 #       define R600_RB_BUFSZ(x)                                ((x) << 0)
 #       define R600_RB_BLKSZ(x)                                ((x) << 8)
+#	define R600_BUF_SWAP_32BIT		               (2 << 16)
 #       define R600_RB_NO_UPDATE                               (1 << 27)
 #       define R600_RB_RPTR_WR_ENA                             (1 << 31)
 #define R600_CP_RB_RPTR_WR                                     0xc108
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 8fd1842..b427488 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -641,7 +641,7 @@
 	switch (connector->connector_type) {
 	case DRM_MODE_CONNECTOR_DVII:
 	case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
-		if (drm_detect_monitor_audio(radeon_connector->edid)) {
+		if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
 			/* fix me */
 			if (ASIC_IS_DCE4(rdev))
 				return ATOM_ENCODER_MODE_DVI;
@@ -655,7 +655,7 @@
 	case DRM_MODE_CONNECTOR_DVID:
 	case DRM_MODE_CONNECTOR_HDMIA:
 	default:
-		if (drm_detect_monitor_audio(radeon_connector->edid)) {
+		if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
 			/* fix me */
 			if (ASIC_IS_DCE4(rdev))
 				return ATOM_ENCODER_MODE_DVI;
@@ -673,7 +673,7 @@
 		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
 		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
 			return ATOM_ENCODER_MODE_DP;
-		else if (drm_detect_monitor_audio(radeon_connector->edid)) {
+		else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
 			/* fix me */
 			if (ASIC_IS_DCE4(rdev))
 				return ATOM_ENCODER_MODE_DVI;
@@ -910,7 +910,7 @@
 
 	args.v1.ucAction = action;
 	if (action == ATOM_TRANSMITTER_ACTION_INIT) {
-		args.v1.usInitInfo = connector_object_id;
+		args.v1.usInitInfo = cpu_to_le16(connector_object_id);
 	} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
 		args.v1.asMode.ucLaneSel = lane_num;
 		args.v1.asMode.ucLaneSet = lane_set;
@@ -1063,7 +1063,7 @@
 	if (!ASIC_IS_DCE4(rdev))
 		return;
 
-	if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) ||
+	if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
 	    (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
 		return;
 
@@ -1140,7 +1140,7 @@
 		case 3:
 			args.v3.sExtEncoder.ucAction = action;
 			if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
-				args.v3.sExtEncoder.usConnectorId = connector_object_id;
+				args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
 			else
 				args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
 			args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
@@ -1570,11 +1570,21 @@
 	}
 
 	/* set scaler clears this on some chips */
-	/* XXX check DCE4 */
-	if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) {
-		if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
-			WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
-			       AVIVO_D1MODE_INTERLEAVE_EN);
+	if (ASIC_IS_AVIVO(rdev) &&
+	    (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
+		if (ASIC_IS_DCE4(rdev)) {
+			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+				WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
+				       EVERGREEN_INTERLEAVE_EN);
+			else
+				WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+		} else {
+			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+				WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
+				       AVIVO_D1MODE_INTERLEAVE_EN);
+			else
+				WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+		}
 	}
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a289646..9ec830c 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -110,11 +110,14 @@
 
 int radeon_irq_kms_init(struct radeon_device *rdev)
 {
+	int i;
 	int r = 0;
 
 	INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
 
 	spin_lock_init(&rdev->irq.sw_lock);
+	for (i = 0; i < rdev->num_crtc; i++)
+		spin_lock_init(&rdev->irq.pflip_lock[i]);
 	r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
 	if (r) {
 		return r;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 28a53e4..8387d32 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -201,6 +201,10 @@
 		}
 		radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
 		break;
+	case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
+		/* return clock value in KHz */
+		value = rdev->clock.spll.reference_freq * 10;
+		break;
 	default:
 		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
 		return -EINVAL;
@@ -243,6 +247,8 @@
 	struct radeon_device *rdev = dev->dev_private;
 	if (rdev->hyperz_filp == file_priv)
 		rdev->hyperz_filp = NULL;
+	if (rdev->cmask_filp == file_priv)
+		rdev->cmask_filp = NULL;
 }
 
 /*
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index ace2e63..cf0638c 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -778,9 +778,9 @@
 	DRM_DEBUG_KMS("\n");
 
 	if (!use_bios_divs) {
-		radeon_compute_pll(pll, mode->clock,
-				   &freq, &feedback_div, &frac_fb_div,
-				   &reference_div, &post_divider);
+		radeon_compute_pll_legacy(pll, mode->clock,
+					  &freq, &feedback_div, &frac_fb_div,
+					  &reference_div, &post_divider);
 
 		for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
 			if (post_div->divider == post_divider)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 12bdeab..a670caa 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -149,6 +149,7 @@
 #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
 #define RADEON_PLL_USE_POST_DIV         (1 << 12)
 #define RADEON_PLL_IS_LCD               (1 << 13)
+#define RADEON_PLL_PREFER_MINM_OVER_MAXP (1 << 14)
 
 struct radeon_pll {
 	/* reference frequency */
@@ -208,6 +209,7 @@
 	CT_EMAC,
 	CT_RN50_POWER,
 	CT_MAC_X800,
+	CT_MAC_G5_9600,
 };
 
 enum radeon_dvo_chip {
@@ -510,13 +512,21 @@
 					     struct radeon_atom_ss *ss,
 					     int id, u32 clock);
 
-extern void radeon_compute_pll(struct radeon_pll *pll,
-			       uint64_t freq,
-			       uint32_t *dot_clock_p,
-			       uint32_t *fb_div_p,
-			       uint32_t *frac_fb_div_p,
-			       uint32_t *ref_div_p,
-			       uint32_t *post_div_p);
+extern void radeon_compute_pll_legacy(struct radeon_pll *pll,
+				      uint64_t freq,
+				      uint32_t *dot_clock_p,
+				      uint32_t *fb_div_p,
+				      uint32_t *frac_fb_div_p,
+				      uint32_t *ref_div_p,
+				      uint32_t *post_div_p);
+
+extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
+				     u32 freq,
+				     u32 *dot_clock_p,
+				     u32 *fb_div_p,
+				     u32 *frac_fb_div_p,
+				     u32 *ref_div_p,
+				     u32 *post_div_p);
 
 extern void radeon_setup_encoder_clones(struct drm_device *dev);
 
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 3b1b2bf..2aed03b 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -430,7 +430,7 @@
 {
 	struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
 	struct radeon_device *rdev = ddev->dev_private;
-	u32 temp;
+	int temp;
 
 	switch (rdev->pm.int_thermal_type) {
 	case THERMAL_TYPE_RV6XX:
@@ -646,6 +646,9 @@
 #endif
 	}
 
+	if (rdev->pm.power_state)
+		kfree(rdev->pm.power_state);
+
 	radeon_hwmon_fini(rdev);
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 3cd4dac..ec93a75 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -375,6 +375,8 @@
 #define RADEON_CONFIG_APER_SIZE             0x0108
 #define RADEON_CONFIG_BONDS                 0x00e8
 #define RADEON_CONFIG_CNTL                  0x00e0
+#       define RADEON_CFG_VGA_RAM_EN        (1 << 8)
+#       define RADEON_CFG_VGA_IO_DIS        (1 << 9)
 #       define RADEON_CFG_ATI_REV_A11       (0   << 16)
 #       define RADEON_CFG_ATI_REV_A12       (1   << 16)
 #       define RADEON_CFG_ATI_REV_A13       (2   << 16)
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 1272e4b..e5b2cf1 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -787,9 +787,9 @@
 		radeon_mem_types_list[i].show = &radeon_mm_dump_table;
 		radeon_mem_types_list[i].driver_features = 0;
 		if (i == 0)
-			radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv;
+			radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
 		else
-			radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv;
+			radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
 
 	}
 	/* Add ttm page pool to debugfs */
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300
index b506ec1..e8a1786 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r300
+++ b/drivers/gpu/drm/radeon/reg_srcs/r300
@@ -683,9 +683,7 @@
 0x4DF4 US_ALU_CONST_G_31
 0x4DF8 US_ALU_CONST_B_31
 0x4DFC US_ALU_CONST_A_31
-0x4E04 RB3D_BLENDCNTL_R3
 0x4E08 RB3D_ABLENDCNTL_R3
-0x4E0C RB3D_COLOR_CHANNEL_MASK
 0x4E10 RB3D_CONSTANT_COLOR
 0x4E14 RB3D_COLOR_CLEAR_VALUE
 0x4E18 RB3D_ROPCNTL_R3
@@ -706,13 +704,11 @@
 0x4E74 RB3D_CMASK_WRINDEX
 0x4E78 RB3D_CMASK_DWORD
 0x4E7C RB3D_CMASK_RDINDEX
-0x4E80 RB3D_AARESOLVE_OFFSET
-0x4E84 RB3D_AARESOLVE_PITCH
-0x4E88 RB3D_AARESOLVE_CTL
 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
 0x4F04 ZB_ZSTENCILCNTL
 0x4F08 ZB_STENCILREFMASK
 0x4F14 ZB_ZTOP
 0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
 0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
index 8c1214c..722074e 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r420
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -130,7 +130,6 @@
 0x401C GB_SELECT
 0x4020 GB_AA_CONFIG
 0x4024 GB_FIFO_SIZE
-0x4028 GB_Z_PEQ_CONFIG
 0x4100 TX_INVALTAGS
 0x4200 GA_POINT_S0
 0x4204 GA_POINT_T0
@@ -750,9 +749,7 @@
 0x4DF4 US_ALU_CONST_G_31
 0x4DF8 US_ALU_CONST_B_31
 0x4DFC US_ALU_CONST_A_31
-0x4E04 RB3D_BLENDCNTL_R3
 0x4E08 RB3D_ABLENDCNTL_R3
-0x4E0C RB3D_COLOR_CHANNEL_MASK
 0x4E10 RB3D_CONSTANT_COLOR
 0x4E14 RB3D_COLOR_CLEAR_VALUE
 0x4E18 RB3D_ROPCNTL_R3
@@ -773,13 +770,11 @@
 0x4E74 RB3D_CMASK_WRINDEX
 0x4E78 RB3D_CMASK_DWORD
 0x4E7C RB3D_CMASK_RDINDEX
-0x4E80 RB3D_AARESOLVE_OFFSET
-0x4E84 RB3D_AARESOLVE_PITCH
-0x4E88 RB3D_AARESOLVE_CTL
 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
 0x4F04 ZB_ZSTENCILCNTL
 0x4F08 ZB_STENCILREFMASK
 0x4F14 ZB_ZTOP
 0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
 0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
index 0828d80..d9f6286 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -749,9 +749,7 @@
 0x4DF4 US_ALU_CONST_G_31
 0x4DF8 US_ALU_CONST_B_31
 0x4DFC US_ALU_CONST_A_31
-0x4E04 RB3D_BLENDCNTL_R3
 0x4E08 RB3D_ABLENDCNTL_R3
-0x4E0C RB3D_COLOR_CHANNEL_MASK
 0x4E10 RB3D_CONSTANT_COLOR
 0x4E14 RB3D_COLOR_CLEAR_VALUE
 0x4E18 RB3D_ROPCNTL_R3
@@ -772,13 +770,11 @@
 0x4E74 RB3D_CMASK_WRINDEX
 0x4E78 RB3D_CMASK_DWORD
 0x4E7C RB3D_CMASK_RDINDEX
-0x4E80 RB3D_AARESOLVE_OFFSET
-0x4E84 RB3D_AARESOLVE_PITCH
-0x4E88 RB3D_AARESOLVE_CTL
 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
 0x4F04 ZB_ZSTENCILCNTL
 0x4F08 ZB_STENCILREFMASK
 0x4F14 ZB_ZTOP
 0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F28 ZB_DEPTHCLEARVALUE
 0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index ef422bb..911a8fb 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -164,7 +164,6 @@
 0x401C GB_SELECT
 0x4020 GB_AA_CONFIG
 0x4024 GB_FIFO_SIZE
-0x4028 GB_Z_PEQ_CONFIG
 0x4100 TX_INVALTAGS
 0x4114 SU_TEX_WRAP_PS3
 0x4118 PS3_ENABLE
@@ -461,9 +460,7 @@
 0x4DF4 US_ALU_CONST_G_31
 0x4DF8 US_ALU_CONST_B_31
 0x4DFC US_ALU_CONST_A_31
-0x4E04 RB3D_BLENDCNTL_R3
 0x4E08 RB3D_ABLENDCNTL_R3
-0x4E0C RB3D_COLOR_CHANNEL_MASK
 0x4E10 RB3D_CONSTANT_COLOR
 0x4E14 RB3D_COLOR_CLEAR_VALUE
 0x4E18 RB3D_ROPCNTL_R3
@@ -484,9 +481,6 @@
 0x4E74 RB3D_CMASK_WRINDEX
 0x4E78 RB3D_CMASK_DWORD
 0x4E7C RB3D_CMASK_RDINDEX
-0x4E80 RB3D_AARESOLVE_OFFSET
-0x4E84 RB3D_AARESOLVE_PITCH
-0x4E88 RB3D_AARESOLVE_CTL
 0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
 0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
 0x4EF8 RB3D_CONSTANT_COLOR_AR
@@ -496,4 +490,5 @@
 0x4F14 ZB_ZTOP
 0x4F18 ZB_ZCACHE_CTLSTAT
 0x4F58 ZB_ZPASS_DATA
+0x4F28 ZB_DEPTHCLEARVALUE
 0x4FD4 ZB_STENCILREFMASK_BF
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 5512e4e..c76283d 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -203,6 +203,9 @@
 	radeon_gart_table_ram_free(rdev);
 }
 
+#define RS400_PTE_WRITEABLE (1 << 2)
+#define RS400_PTE_READABLE  (1 << 3)
+
 int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
 {
 	uint32_t entry;
@@ -213,7 +216,7 @@
 
 	entry = (lower_32_bits(addr) & PAGE_MASK) |
 		((upper_32_bits(addr) & 0xff) << 4) |
-		0xc;
+		RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
 	entry = cpu_to_le32(entry);
 	rdev->gart.table.ram.ptr[i] = entry;
 	return 0;
@@ -226,8 +229,8 @@
 
 	for (i = 0; i < rdev->usec_timeout; i++) {
 		/* read MC_STATUS */
-		tmp = RREG32(0x0150);
-		if (tmp & (1 << 2)) {
+		tmp = RREG32(RADEON_MC_STATUS);
+		if (tmp & RADEON_MC_IDLE) {
 			return 0;
 		}
 		DRM_UDELAY(1);
@@ -241,7 +244,7 @@
 	r420_pipes_init(rdev);
 	if (rs400_mc_wait_for_idle(rdev)) {
 		printk(KERN_WARNING "rs400: Failed to wait MC idle while "
-		       "programming pipes. Bad things might happen. %08x\n", RREG32(0x150));
+		       "programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS));
 	}
 }
 
@@ -300,9 +303,9 @@
 		seq_printf(m, "MCCFG_AGP_BASE_2 0x%08x\n", tmp);
 		tmp = RREG32_MC(RS690_MCCFG_AGP_LOCATION);
 		seq_printf(m, "MCCFG_AGP_LOCATION 0x%08x\n", tmp);
-		tmp = RREG32_MC(0x100);
+		tmp = RREG32_MC(RS690_MCCFG_FB_LOCATION);
 		seq_printf(m, "MCCFG_FB_LOCATION 0x%08x\n", tmp);
-		tmp = RREG32(0x134);
+		tmp = RREG32(RS690_HDP_FB_LOCATION);
 		seq_printf(m, "HDP_FB_LOCATION 0x%08x\n", tmp);
 	} else {
 		tmp = RREG32(RADEON_AGP_BASE);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 0137d3e..6638c8e 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -77,9 +77,9 @@
 		switch (crev) {
 		case 1:
 			tmp.full = dfixed_const(100);
-			rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock);
+			rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock));
 			rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
-			if (info->info.usK8MemoryClock)
+			if (le16_to_cpu(info->info.usK8MemoryClock))
 				rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
 			else if (rdev->clock.default_mclk) {
 				rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
@@ -91,16 +91,16 @@
 			break;
 		case 2:
 			tmp.full = dfixed_const(100);
-			rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock);
+			rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock));
 			rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
-			if (info->info_v2.ulBootUpUMAClock)
-				rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock);
+			if (le32_to_cpu(info->info_v2.ulBootUpUMAClock))
+				rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock));
 			else if (rdev->clock.default_mclk)
 				rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
 			else
 				rdev->pm.igp_system_mclk.full = dfixed_const(66700);
 			rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
-			rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq);
+			rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq));
 			rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
 			rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
 			break;
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 5d569f4..64b57af 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -69,13 +69,13 @@
 			  ISYNC_CPSCRATCH_IDLEGUI);
 	radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
 	radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
-	radeon_ring_write(rdev, PACKET0(0x170C, 0));
-	radeon_ring_write(rdev, 1 << 31);
+	radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
+	radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
 	radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
 	radeon_ring_write(rdev, 0);
 	radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
 	radeon_ring_write(rdev, 0);
-	radeon_ring_write(rdev, PACKET0(0x42C8, 0));
+	radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0));
 	radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
 	radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
 	radeon_ring_write(rdev, 0);
@@ -153,8 +153,8 @@
 	}
 	rv515_vga_render_disable(rdev);
 	r420_pipes_init(rdev);
-	gb_pipe_select = RREG32(0x402C);
-	tmp = RREG32(0x170C);
+	gb_pipe_select = RREG32(R400_GB_PIPE_SELECT);
+	tmp = RREG32(R300_DST_PIPE_CONFIG);
 	pipe_select_current = (tmp >> 2) & 3;
 	tmp = (1 << pipe_select_current) |
 	      (((gb_pipe_select >> 8) & 0xF) << 4);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 491dc90..d8ba676 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -78,18 +78,23 @@
 }
 
 /* get temperature in millidegrees */
-u32 rv770_get_temp(struct radeon_device *rdev)
+int rv770_get_temp(struct radeon_device *rdev)
 {
 	u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
 		ASIC_T_SHIFT;
-	u32 actual_temp = 0;
+	int actual_temp;
 
-	if ((temp >> 9) & 1)
-		actual_temp = 0;
-	else
-		actual_temp = (temp >> 1) & 0xff;
+	if (temp & 0x400)
+		actual_temp = -256;
+	else if (temp & 0x200)
+		actual_temp = 255;
+	else if (temp & 0x100) {
+		actual_temp = temp & 0x1ff;
+		actual_temp |= ~0x1ff;
+	} else
+		actual_temp = temp & 0xff;
 
-	return actual_temp * 1000;
+	return (actual_temp * 1000) / 2;
 }
 
 void rv770_pm_misc(struct radeon_device *rdev)
@@ -316,7 +321,11 @@
 		return -EINVAL;
 
 	r700_cp_stop(rdev);
-	WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
+	WREG32(CP_RB_CNTL,
+#ifdef __BIG_ENDIAN
+	       BUF_SWAP_32BIT |
+#endif
+	       RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
 
 	/* Reset cp */
 	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index abc8cf5..79fa588 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -76,10 +76,10 @@
 #define		ROQ_IB1_START(x)				((x) << 0)
 #define		ROQ_IB2_START(x)				((x) << 8)
 #define	CP_RB_CNTL					0xC104
-#define		RB_BUFSZ(x)					((x)<<0)
-#define		RB_BLKSZ(x)					((x)<<8)
-#define		RB_NO_UPDATE					(1<<27)
-#define		RB_RPTR_WR_ENA					(1<<31)
+#define		RB_BUFSZ(x)					((x) << 0)
+#define		RB_BLKSZ(x)					((x) << 8)
+#define		RB_NO_UPDATE					(1 << 27)
+#define		RB_RPTR_WR_ENA					(1 << 31)
 #define		BUF_SWAP_32BIT					(2 << 16)
 #define	CP_RB_RPTR					0x8700
 #define	CP_RB_RPTR_ADDR					0xC10C
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
index 09aea5f..70e60a4 100644
--- a/drivers/gpu/stub/Kconfig
+++ b/drivers/gpu/stub/Kconfig
@@ -1,11 +1,13 @@
 config STUB_POULSBO
 	tristate "Intel GMA500 Stub Driver"
 	depends on PCI
+	depends on NET # for THERMAL
 	# Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
 	# but for select to work, need to select ACPI_VIDEO's dependencies, ick
 	select BACKLIGHT_CLASS_DEVICE if ACPI
 	select INPUT if ACPI
 	select ACPI_VIDEO if ACPI
+	select THERMAL if ACPI
 	help
 	  Choose this option if you have a system that has Intel GMA500
 	  (Poulsbo) integrated graphics. If M is selected, the module will
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 8d0e31a..96c83a9 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -1,5 +1,5 @@
 config VGA_ARB
-	bool "VGA Arbitration" if EMBEDDED
+	bool "VGA Arbitration" if EXPERT
 	default y
 	depends on PCI
 	help
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index c380c65..ace2b16 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -636,7 +636,7 @@
 			void (*irq_set_state)(void *cookie, bool state),
 			unsigned int (*set_vga_decode)(void *cookie, bool decode))
 {
-	int ret = -1;
+	int ret = -ENODEV;
 	struct vga_device *vgadev;
 	unsigned long flags;
 
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 24cca2f..2560f01 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -62,9 +62,9 @@
 	Support for 3M PCT touch screens.
 
 config HID_A4TECH
-	tristate "A4 tech mice" if EMBEDDED
+	tristate "A4 tech mice" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for A4 tech X5 and WOP-35 / Trust 450L mice.
 
@@ -77,9 +77,9 @@
 	game controllers.
 
 config HID_APPLE
-	tristate "Apple {i,Power,Mac}Books" if EMBEDDED
+	tristate "Apple {i,Power,Mac}Books" if EXPERT
 	depends on (USB_HID || BT_HIDP)
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for some Apple devices which less or more break
 	HID specification.
@@ -88,9 +88,9 @@
 	MacBooks, MacBook Pros and Apple Aluminum.
 
 config HID_BELKIN
-	tristate "Belkin Flip KVM and Wireless keyboard" if EMBEDDED
+	tristate "Belkin Flip KVM and Wireless keyboard" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for Belkin Flip KVM and Wireless keyboard.
 
@@ -101,16 +101,16 @@
 	Support for Cando dual touch panel.
 
 config HID_CHERRY
-	tristate "Cherry Cymotion keyboard" if EMBEDDED
+	tristate "Cherry Cymotion keyboard" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for Cherry Cymotion keyboard.
 
 config HID_CHICONY
-	tristate "Chicony Tactical pad" if EMBEDDED
+	tristate "Chicony Tactical pad" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for Chicony Tactical pad.
 
@@ -130,9 +130,9 @@
 	  and some additional multimedia keys.
 
 config HID_CYPRESS
-	tristate "Cypress mouse and barcode readers" if EMBEDDED
+	tristate "Cypress mouse and barcode readers" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for cypress mouse and barcode readers.
 
@@ -174,16 +174,16 @@
 	Support for the ELECOM BM084 (bluetooth mouse).
 
 config HID_EZKEY
-	tristate "Ezkey BTC 8193 keyboard" if EMBEDDED
+	tristate "Ezkey BTC 8193 keyboard" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for Ezkey BTC 8193 keyboard.
 
 config HID_KYE
-	tristate "Kye/Genius Ergo Mouse" if EMBEDDED
+	tristate "Kye/Genius Ergo Mouse" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for Kye/Genius Ergo Mouse.
 
@@ -212,16 +212,16 @@
 	Support for Twinhan IR remote control.
 
 config HID_KENSINGTON
-	tristate "Kensington Slimblade Trackball" if EMBEDDED
+	tristate "Kensington Slimblade Trackball" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for Kensington Slimblade Trackball.
 
 config HID_LOGITECH
-	tristate "Logitech devices" if EMBEDDED
+	tristate "Logitech devices" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for Logitech devices that are not fully compliant with HID standard.
 
@@ -276,9 +276,9 @@
 	Apple Wireless "Magic" Mouse.
 
 config HID_MICROSOFT
-	tristate "Microsoft non-fully HID-compliant devices" if EMBEDDED
+	tristate "Microsoft non-fully HID-compliant devices" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for Microsoft devices that are not fully compliant with HID standard.
 
@@ -289,9 +289,9 @@
 	Support for MosArt dual-touch panels.
 
 config HID_MONTEREY
-	tristate "Monterey Genius KB29E keyboard" if EMBEDDED
+	tristate "Monterey Genius KB29E keyboard" if EXPERT
 	depends on USB_HID
-	default !EMBEDDED
+	default !EXPERT
 	---help---
 	Support for Monterey Genius KB29E.
 
@@ -365,8 +365,8 @@
 	  - IR
 
 config HID_PICOLCD_FB
-	bool "Framebuffer support" if EMBEDDED
-	default !EMBEDDED
+	bool "Framebuffer support" if EXPERT
+	default !EXPERT
 	depends on HID_PICOLCD
 	depends on HID_PICOLCD=FB || FB=y
 	select FB_DEFERRED_IO
@@ -379,8 +379,8 @@
 	  frambuffer device.
 
 config HID_PICOLCD_BACKLIGHT
-	bool "Backlight control" if EMBEDDED
-	default !EMBEDDED
+	bool "Backlight control" if EXPERT
+	default !EXPERT
 	depends on HID_PICOLCD
 	depends on HID_PICOLCD=BACKLIGHT_CLASS_DEVICE || BACKLIGHT_CLASS_DEVICE=y
 	---help---
@@ -388,16 +388,16 @@
 	  class.
 
 config HID_PICOLCD_LCD
-	bool "Contrast control" if EMBEDDED
-	default !EMBEDDED
+	bool "Contrast control" if EXPERT
+	default !EXPERT
 	depends on HID_PICOLCD
 	depends on HID_PICOLCD=LCD_CLASS_DEVICE || LCD_CLASS_DEVICE=y
 	---help---
 	  Provide access to PicoLCD's LCD contrast via lcd class.
 
 config HID_PICOLCD_LEDS
-	bool "GPO via leds class" if EMBEDDED
-	default !EMBEDDED
+	bool "GPO via leds class" if EXPERT
+	default !EXPERT
 	depends on HID_PICOLCD
 	depends on HID_PICOLCD=LEDS_CLASS || LEDS_CLASS=y
 	---help---
diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig
index 4edb3be..0f20fd1 100644
--- a/drivers/hid/usbhid/Kconfig
+++ b/drivers/hid/usbhid/Kconfig
@@ -45,7 +45,7 @@
 	  If unsure, say Y.
 
 menu "USB HID Boot Protocol drivers"
-	depends on USB!=n && USB_HID!=y && EMBEDDED
+	depends on USB!=n && USB_HID!=y && EXPERT
 
 config USB_KBD
 	tristate "USB HIDBP Keyboard (simple Boot) support"
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index ce0372f..4c07436 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -1072,6 +1072,7 @@
 			node->sda.dev_attr.show = grp->show;
 			node->sda.dev_attr.store = grp->store;
 			attr = &node->sda.dev_attr.attr;
+			sysfs_attr_init(attr);
 			attr->name = node->name;
 			attr->mode = S_IRUGO | (grp->store ? S_IWUSR : 0);
 			ret = sysfs_create_file(&pdev->dev.kobj, attr);
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 2d68cf3..b5e8920 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -13,6 +13,7 @@
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/dmi.h>
 
 #include <acpi/acpi.h>
 #include <acpi/acpixf.h>
@@ -22,6 +23,21 @@
 
 #define ATK_HID "ATK0110"
 
+static bool new_if;
+module_param(new_if, bool, 0);
+MODULE_PARM_DESC(new_if, "Override detection heuristic and force the use of the new ATK0110 interface");
+
+static const struct dmi_system_id __initconst atk_force_new_if[] = {
+	{
+		/* Old interface has broken MCH temp monitoring */
+		.ident = "Asus Sabertooth X58",
+		.matches = {
+			DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58")
+		}
+	},
+	{ }
+};
+
 /* Minimum time between readings, enforced in order to avoid
  * hogging the CPU.
  */
@@ -1302,7 +1318,9 @@
 	 * analysis of multiple DSDTs indicates that when both interfaces
 	 * are present the new one (GGRP/GITM) is not functional.
 	 */
-	if (data->rtmp_handle && data->rvlt_handle && data->rfan_handle)
+	if (new_if)
+		dev_info(dev, "Overriding interface detection\n");
+	if (data->rtmp_handle && data->rvlt_handle && data->rfan_handle && !new_if)
 		data->old_interface = true;
 	else if (data->enumerate_handle && data->read_handle &&
 			data->write_handle)
@@ -1420,6 +1438,9 @@
 		return -EBUSY;
 	}
 
+	if (dmi_check_system(atk_force_new_if))
+		new_if = true;
+
 	ret = acpi_bus_register_driver(&atk_driver);
 	if (ret)
 		pr_info("acpi_bus_register_driver failed: %d\n", ret);
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 5dea9fa..cd2a6e4 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -344,7 +344,7 @@
 }
 
 static const unsigned short emc1403_address_list[] = {
-	0x18, 0x2a, 0x4c, 0x4d, I2C_CLIENT_END
+	0x18, 0x29, 0x4c, 0x4d, I2C_CLIENT_END
 };
 
 static const struct i2c_device_id emc1403_idtable[] = {
diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
index 1b674b7..d805e8e 100644
--- a/drivers/hwmon/lis3lv02d.c
+++ b/drivers/hwmon/lis3lv02d.c
@@ -957,7 +957,7 @@
 
 	/* bail if we did not get an IRQ from the bus layer */
 	if (!dev->irq) {
-		pr_err("No IRQ. Disabling /dev/freefall\n");
+		pr_debug("No IRQ. Disabling /dev/freefall\n");
 		goto out;
 	}
 
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 776aeb3..508cb29 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -98,6 +98,9 @@
  * value, it uses signed 8-bit values with LSB = 1 degree Celsius.
  * For remote temperature, low and high limits, it uses signed 11-bit values
  * with LSB = 0.125 degree Celsius, left-justified in 16-bit registers.
+ * For LM64 the actual remote diode temperature is 16 degree Celsius higher
+ * than the register reading. Remote temperature setpoints have to be
+ * adapted accordingly.
  */
 
 #define FAN_FROM_REG(reg)	((reg) == 0xFFFC || (reg) == 0 ? 0 : \
@@ -165,6 +168,8 @@
 	struct mutex update_lock;
 	char valid; /* zero until following fields are valid */
 	unsigned long last_updated; /* in jiffies */
+	int kind;
+	int temp2_offset;
 
 	/* registers values */
 	u8 config, config_fan;
@@ -247,16 +252,34 @@
 	return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2);
 }
 
-static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr,
-			  char *buf)
+/*
+ * There are 8bit registers for both local(temp1) and remote(temp2) sensor.
+ * For remote sensor registers temp2_offset has to be considered,
+ * for local sensor it must not.
+ * So we need separate 8bit accessors for local and remote sensor.
+ */
+static ssize_t show_local_temp8(struct device *dev,
+				struct device_attribute *devattr,
+				char *buf)
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 	struct lm63_data *data = lm63_update_device(dev);
 	return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index]));
 }
 
-static ssize_t set_temp8(struct device *dev, struct device_attribute *dummy,
-			 const char *buf, size_t count)
+static ssize_t show_remote_temp8(struct device *dev,
+				 struct device_attribute *devattr,
+				 char *buf)
+{
+	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+	struct lm63_data *data = lm63_update_device(dev);
+	return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index])
+		       + data->temp2_offset);
+}
+
+static ssize_t set_local_temp8(struct device *dev,
+			       struct device_attribute *dummy,
+			       const char *buf, size_t count)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	struct lm63_data *data = i2c_get_clientdata(client);
@@ -274,7 +297,8 @@
 {
 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
 	struct lm63_data *data = lm63_update_device(dev);
-	return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index]));
+	return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index])
+		       + data->temp2_offset);
 }
 
 static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
@@ -294,7 +318,7 @@
 	int nr = attr->index;
 
 	mutex_lock(&data->update_lock);
-	data->temp11[nr] = TEMP11_TO_REG(val);
+	data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset);
 	i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2],
 				  data->temp11[nr] >> 8);
 	i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1],
@@ -310,6 +334,7 @@
 {
 	struct lm63_data *data = lm63_update_device(dev);
 	return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[2])
+		       + data->temp2_offset
 		       - TEMP8_FROM_REG(data->temp2_crit_hyst));
 }
 
@@ -324,7 +349,7 @@
 	long hyst;
 
 	mutex_lock(&data->update_lock);
-	hyst = TEMP8_FROM_REG(data->temp8[2]) - val;
+	hyst = TEMP8_FROM_REG(data->temp8[2]) + data->temp2_offset - val;
 	i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST,
 				  HYST_TO_REG(hyst));
 	mutex_unlock(&data->update_lock);
@@ -355,16 +380,21 @@
 static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1);
 static DEVICE_ATTR(pwm1_enable, S_IRUGO, show_pwm1_enable, NULL);
 
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp8, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp8,
-	set_temp8, 1);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_local_temp8, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_local_temp8,
+	set_local_temp8, 1);
 
 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0);
 static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
 	set_temp11, 1);
 static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
 	set_temp11, 2);
-static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp8, NULL, 2);
+/*
+ * On LM63, temp2_crit can be set only once, which should be job
+ * of the bootloader.
+ */
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_remote_temp8,
+	NULL, 2);
 static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst,
 	set_temp2_crit_hyst);
 
@@ -479,7 +509,12 @@
 	data->valid = 0;
 	mutex_init(&data->update_lock);
 
-	/* Initialize the LM63 chip */
+	/* Set the device type */
+	data->kind = id->driver_data;
+	if (data->kind == lm64)
+		data->temp2_offset = 16000;
+
+	/* Initialize chip */
 	lm63_init_client(new_client);
 
 	/* Register sysfs hooks */
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 98ccfeb..9827c5e 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -134,7 +134,7 @@
 	  module will be called ide-cd.
 
 config BLK_DEV_IDECD_VERBOSE_ERRORS
-	bool "Verbose error logging for IDE/ATAPI CDROM driver" if EMBEDDED
+	bool "Verbose error logging for IDE/ATAPI CDROM driver" if EXPERT
 	depends on BLK_DEV_IDECD
 	default y
 	help
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 7acb32e..1fa091e 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -263,7 +263,7 @@
 	clockevents_notify(reason, &cpu);
 }
 
-static int __cpuinit setup_broadcast_cpuhp_notify(struct notifier_block *n,
+static int setup_broadcast_cpuhp_notify(struct notifier_block *n,
 		unsigned long action, void *hcpu)
 {
 	int hotcpu = (unsigned long)hcpu;
@@ -273,15 +273,11 @@
 		smp_call_function_single(hotcpu, __setup_broadcast_timer,
 			(void *)true, 1);
 		break;
-	case CPU_DOWN_PREPARE:
-		smp_call_function_single(hotcpu, __setup_broadcast_timer,
-			(void *)false, 1);
-		break;
 	}
 	return NOTIFY_OK;
 }
 
-static struct notifier_block __cpuinitdata setup_broadcast_notifier = {
+static struct notifier_block setup_broadcast_notifier = {
 	.notifier_call = setup_broadcast_cpuhp_notify,
 };
 
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index e38be1b..fbbfa24 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1079,7 +1079,7 @@
 
 	ib_unregister_event_handler(&sa_dev->event_handler);
 
-	flush_scheduled_work();
+	flush_workqueue(ib_wq);
 
 	for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
 		if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) {
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ca12acf..ec1e9da 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -636,6 +636,16 @@
 	}
 }
 
+static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
+			       struct rdma_route *route)
+{
+	struct rdma_dev_addr *dev_addr;
+
+	dev_addr = &route->addr.dev_addr;
+	rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
+	rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
+}
+
 static ssize_t ucma_query_route(struct ucma_file *file,
 				const char __user *inbuf,
 				int in_len, int out_len)
@@ -670,8 +680,10 @@
 
 	resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
 	resp.port_num = ctx->cm_id->port_num;
-	if (rdma_node_get_transport(ctx->cm_id->device->node_type) == RDMA_TRANSPORT_IB) {
-		switch (rdma_port_get_link_layer(ctx->cm_id->device, ctx->cm_id->port_num)) {
+	switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
+	case RDMA_TRANSPORT_IB:
+		switch (rdma_port_get_link_layer(ctx->cm_id->device,
+			ctx->cm_id->port_num)) {
 		case IB_LINK_LAYER_INFINIBAND:
 			ucma_copy_ib_route(&resp, &ctx->cm_id->route);
 			break;
@@ -681,6 +693,12 @@
 		default:
 			break;
 		}
+		break;
+	case RDMA_TRANSPORT_IWARP:
+		ucma_copy_iw_route(&resp, &ctx->cm_id->route);
+		break;
+	default:
+		break;
 	}
 
 out:
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.c b/drivers/infiniband/hw/amso1100/c2_vq.c
index 9ce7819b..2ec716f 100644
--- a/drivers/infiniband/hw/amso1100/c2_vq.c
+++ b/drivers/infiniband/hw/amso1100/c2_vq.c
@@ -107,7 +107,7 @@
 	r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL);
 	if (r) {
 		init_waitqueue_head(&r->wait_object);
-		r->reply_msg = (u64) NULL;
+		r->reply_msg = 0;
 		r->event = 0;
 		r->cm_id = NULL;
 		r->qp = NULL;
@@ -123,7 +123,7 @@
  */
 void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r)
 {
-	r->reply_msg = (u64) NULL;
+	r->reply_msg = 0;
 	if (atomic_dec_and_test(&r->refcnt)) {
 		kfree(r);
 	}
@@ -151,7 +151,7 @@
 void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r)
 {
 	if (atomic_dec_and_test(&r->refcnt)) {
-		if (r->reply_msg != (u64) NULL)
+		if (r->reply_msg != 0)
 			vq_repbuf_free(c2dev,
 				       (void *) (unsigned long) r->reply_msg);
 		kfree(r);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 0dc62b1..8b00e6c 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -380,7 +380,7 @@
 					  16)) | FW_WR_FLOWID(ep->hwtid));
 
 	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
-	flowc->mnemval[0].val = cpu_to_be32(0);
+	flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8);
 	flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
 	flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
 	flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 2080090..4f0be25 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -220,7 +220,7 @@
 		V_FW_RI_RES_WR_DCAEN(0) |
 		V_FW_RI_RES_WR_DCACPU(0) |
 		V_FW_RI_RES_WR_FBMIN(2) |
-		V_FW_RI_RES_WR_FBMAX(3) |
+		V_FW_RI_RES_WR_FBMAX(2) |
 		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
 		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
 		V_FW_RI_RES_WR_EQSIZE(eqsize));
@@ -243,7 +243,7 @@
 		V_FW_RI_RES_WR_DCAEN(0) |
 		V_FW_RI_RES_WR_DCACPU(0) |
 		V_FW_RI_RES_WR_FBMIN(2) |
-		V_FW_RI_RES_WR_FBMAX(3) |
+		V_FW_RI_RES_WR_FBMAX(2) |
 		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
 		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
 		V_FW_RI_RES_WR_EQSIZE(eqsize));
diff --git a/drivers/infiniband/hw/mthca/Kconfig b/drivers/infiniband/hw/mthca/Kconfig
index 03efc07..da314c3 100644
--- a/drivers/infiniband/hw/mthca/Kconfig
+++ b/drivers/infiniband/hw/mthca/Kconfig
@@ -7,7 +7,7 @@
 	  ("Tavor") and the MT25208 PCI Express HCA ("Arbel").
 
 config INFINIBAND_MTHCA_DEBUG
-	bool "Verbose debugging output" if EMBEDDED
+	bool "Verbose debugging output" if EXPERT
 	depends on INFINIBAND_MTHCA
 	default y
 	---help---
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 3b4ec32..3d7f366 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -153,7 +153,8 @@
 				nesdev, nesdev->netdev[0]->name);
 		netdev = nesdev->netdev[0];
 		nesvnic = netdev_priv(netdev);
-		is_bonded = (netdev->master == event_netdev);
+		is_bonded = netif_is_bond_slave(netdev) &&
+			    (netdev->master == event_netdev);
 		if ((netdev == event_netdev) || is_bonded) {
 			if (nesvnic->rdma_enabled == 0) {
 				nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since"
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 009ec81..ec3aa11 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1118,7 +1118,7 @@
 		return rc;
 	}
 
-	if (nesvnic->netdev->master)
+	if (netif_is_bond_slave(netdev))
 		netdev = nesvnic->netdev->master;
 	else
 		netdev = nesvnic->netdev;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 8b606fd..08c1948 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -2610,9 +2610,11 @@
 					netif_carrier_on(nesvnic->netdev);
 
 					spin_lock(&nesvnic->port_ibevent_lock);
-					if (nesdev->iw_status == 0) {
-						nesdev->iw_status = 1;
-						nes_port_ibevent(nesvnic);
+					if (nesvnic->of_device_registered) {
+						if (nesdev->iw_status == 0) {
+							nesdev->iw_status = 1;
+							nes_port_ibevent(nesvnic);
+						}
 					}
 					spin_unlock(&nesvnic->port_ibevent_lock);
 				}
@@ -2642,9 +2644,11 @@
 					netif_carrier_off(nesvnic->netdev);
 
 					spin_lock(&nesvnic->port_ibevent_lock);
-					if (nesdev->iw_status == 1) {
-						nesdev->iw_status = 0;
-						nes_port_ibevent(nesvnic);
+					if (nesvnic->of_device_registered) {
+						if (nesdev->iw_status == 1) {
+							nesdev->iw_status = 0;
+							nes_port_ibevent(nesvnic);
+						}
 					}
 					spin_unlock(&nesvnic->port_ibevent_lock);
 				}
@@ -2703,9 +2707,11 @@
 				netif_carrier_on(nesvnic->netdev);
 
 				spin_lock(&nesvnic->port_ibevent_lock);
-				if (nesdev->iw_status == 0) {
-					nesdev->iw_status = 1;
-					nes_port_ibevent(nesvnic);
+				if (nesvnic->of_device_registered) {
+					if (nesdev->iw_status == 0) {
+						nesdev->iw_status = 1;
+						nes_port_ibevent(nesvnic);
+					}
 				}
 				spin_unlock(&nesvnic->port_ibevent_lock);
 			}
@@ -2723,9 +2729,11 @@
 				netif_carrier_off(nesvnic->netdev);
 
 				spin_lock(&nesvnic->port_ibevent_lock);
-				if (nesdev->iw_status == 1) {
-					nesdev->iw_status = 0;
-					nes_port_ibevent(nesvnic);
+				if (nesvnic->of_device_registered) {
+					if (nesdev->iw_status == 1) {
+						nesdev->iw_status = 0;
+						nes_port_ibevent(nesvnic);
+					}
 				}
 				spin_unlock(&nesvnic->port_ibevent_lock);
 			}
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 50cceb3..b01809a 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -623,7 +623,6 @@
 	u8 ibmalfusesnap;
 	struct qib_qsfp_data qsfp_data;
 	char epmsgbuf[192]; /* for port error interrupt msg buffer */
-	u8 bounced;
 };
 
 static struct {
@@ -1881,23 +1880,7 @@
 		    IB_PHYSPORTSTATE_DISABLED)
 			qib_set_ib_7322_lstate(ppd, 0,
 			       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
-		else {
-			u32 lstate;
-			/*
-			 * We need the current logical link state before
-			 * lflags are set in handle_e_ibstatuschanged.
-			 */
-			lstate = qib_7322_iblink_state(ibcs);
-
-			if (IS_QMH(dd) && !ppd->cpspec->bounced &&
-			    ltstate == IB_PHYSPORTSTATE_LINKUP &&
-			    (lstate >= IB_PORT_INIT &&
-				lstate <= IB_PORT_ACTIVE)) {
-				ppd->cpspec->bounced = 1;
-				qib_7322_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
-					IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
-			}
-
+		else
 			/*
 			 * Since going into a recovery state causes the link
 			 * state to go down and since recovery is transitory,
@@ -1911,7 +1894,6 @@
 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
 				qib_handle_e_ibstatuschanged(ppd, ibcs);
-		}
 	}
 	if (*msg && iserr)
 		qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
@@ -2381,6 +2363,11 @@
 	qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
 
+	/* Hold the link state machine for mezz boards */
+	if (IS_QMH(dd) || IS_QME(dd))
+		qib_set_ib_7322_lstate(ppd, 0,
+				       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
+
 	/* Also enable IBSTATUSCHG interrupt.  */
 	val = qib_read_kreg_port(ppd, krp_errmask);
 	qib_write_kreg_port(ppd, krp_errmask,
@@ -5702,6 +5689,11 @@
 				ppd->cpspec->h1_val = h1;
 			/* now change the IBC and serdes, overriding generic */
 			init_txdds_table(ppd, 1);
+			/* Re-enable the physical state machine on mezz boards
+			 * now that the correct settings have been set. */
+			if (IS_QMH(dd) || IS_QME(dd))
+				qib_set_ib_7322_lstate(ppd, 0,
+					    QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
 			any++;
 		}
 		if (*nxt == '\n')
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 8245237..eca0c41 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1005,7 +1005,8 @@
 	 * there are still requests that haven't been acked.
 	 */
 	if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
-	    !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)))
+	    !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) &&
+	    (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
 		start_timer(qp);
 
 	while (qp->s_last != qp->s_acked) {
@@ -1439,6 +1440,8 @@
 	}
 
 	spin_lock_irqsave(&qp->s_lock, flags);
+	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
+		goto ack_done;
 
 	/* Ignore invalid responses. */
 	if (qib_cmp24(psn, qp->s_next_psn) >= 0)
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
index 55855ee..cda8eac 100644
--- a/drivers/infiniband/ulp/ipoib/Kconfig
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -24,7 +24,7 @@
 	  unless you limit mtu for these destinations to 2044.
 
 config INFINIBAND_IPOIB_DEBUG
-	bool "IP-over-InfiniBand debugging" if EMBEDDED
+	bool "IP-over-InfiniBand debugging" if EXPERT
 	depends on INFINIBAND_IPOIB
 	default y
 	---help---
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 07c2cd4..1903c0f 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -6,7 +6,7 @@
 	depends on !S390
 
 config INPUT
-	tristate "Generic input layer (needed for keyboard, mouse, ...)" if EMBEDDED
+	tristate "Generic input layer (needed for keyboard, mouse, ...)" if EXPERT
 	default y
 	help
 	  Say Y here if you have any input device (mouse, keyboard, tablet,
@@ -67,7 +67,7 @@
 comment "Userland interfaces"
 
 config INPUT_MOUSEDEV
-	tristate "Mouse interface" if EMBEDDED
+	tristate "Mouse interface" if EXPERT
 	default y
 	help
 	  Say Y here if you want your mouse to be accessible as char devices
@@ -150,7 +150,7 @@
 	  module will be called evbug.
 
 config INPUT_APMPOWER
-	tristate "Input Power Event -> APM Bridge" if EMBEDDED
+	tristate "Input Power Event -> APM Bridge" if EXPERT
 	depends on INPUT && APM_EMULATION
 	help
 	  Say Y here if you want suspend key events to trigger a user
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 7985114..11905b6 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -75,7 +75,6 @@
  * dev->event_lock held and interrupts disabled.
  */
 static void input_pass_event(struct input_dev *dev,
-			     struct input_handler *src_handler,
 			     unsigned int type, unsigned int code, int value)
 {
 	struct input_handler *handler;
@@ -94,15 +93,6 @@
 				continue;
 
 			handler = handle->handler;
-
-			/*
-			 * If this is the handler that injected this
-			 * particular event we want to skip it to avoid
-			 * filters firing again and again.
-			 */
-			if (handler == src_handler)
-				continue;
-
 			if (!handler->filter) {
 				if (filtered)
 					break;
@@ -132,7 +122,7 @@
 	if (test_bit(dev->repeat_key, dev->key) &&
 	    is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
 
-		input_pass_event(dev, NULL, EV_KEY, dev->repeat_key, 2);
+		input_pass_event(dev, EV_KEY, dev->repeat_key, 2);
 
 		if (dev->sync) {
 			/*
@@ -141,7 +131,7 @@
 			 * Otherwise assume that the driver will send
 			 * SYN_REPORT once it's done.
 			 */
-			input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
+			input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
 		}
 
 		if (dev->rep[REP_PERIOD])
@@ -174,7 +164,6 @@
 #define INPUT_PASS_TO_ALL	(INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
 
 static int input_handle_abs_event(struct input_dev *dev,
-				  struct input_handler *src_handler,
 				  unsigned int code, int *pval)
 {
 	bool is_mt_event;
@@ -218,15 +207,13 @@
 	/* Flush pending "slot" event */
 	if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
 		input_abs_set_val(dev, ABS_MT_SLOT, dev->slot);
-		input_pass_event(dev, src_handler,
-				 EV_ABS, ABS_MT_SLOT, dev->slot);
+		input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot);
 	}
 
 	return INPUT_PASS_TO_HANDLERS;
 }
 
 static void input_handle_event(struct input_dev *dev,
-			       struct input_handler *src_handler,
 			       unsigned int type, unsigned int code, int value)
 {
 	int disposition = INPUT_IGNORE_EVENT;
@@ -279,8 +266,7 @@
 
 	case EV_ABS:
 		if (is_event_supported(code, dev->absbit, ABS_MAX))
-			disposition = input_handle_abs_event(dev, src_handler,
-							     code, &value);
+			disposition = input_handle_abs_event(dev, code, &value);
 
 		break;
 
@@ -338,7 +324,7 @@
 		dev->event(dev, type, code, value);
 
 	if (disposition & INPUT_PASS_TO_HANDLERS)
-		input_pass_event(dev, src_handler, type, code, value);
+		input_pass_event(dev, type, code, value);
 }
 
 /**
@@ -367,7 +353,7 @@
 
 		spin_lock_irqsave(&dev->event_lock, flags);
 		add_input_randomness(type, code, value);
-		input_handle_event(dev, NULL, type, code, value);
+		input_handle_event(dev, type, code, value);
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 	}
 }
@@ -397,8 +383,7 @@
 		rcu_read_lock();
 		grab = rcu_dereference(dev->grab);
 		if (!grab || grab == handle)
-			input_handle_event(dev, handle->handler,
-					   type, code, value);
+			input_handle_event(dev, type, code, value);
 		rcu_read_unlock();
 
 		spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -611,10 +596,10 @@
 		for (code = 0; code <= KEY_MAX; code++) {
 			if (is_event_supported(code, dev->keybit, KEY_MAX) &&
 			    __test_and_clear_bit(code, dev->key)) {
-				input_pass_event(dev, NULL, EV_KEY, code, 0);
+				input_pass_event(dev, EV_KEY, code, 0);
 			}
 		}
-		input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
+		input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
 	}
 }
 
@@ -889,9 +874,9 @@
 	    !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
 	    __test_and_clear_bit(old_keycode, dev->key)) {
 
-		input_pass_event(dev, NULL, EV_KEY, old_keycode, 0);
+		input_pass_event(dev, EV_KEY, old_keycode, 0);
 		if (dev->sync)
-			input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
+			input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
 	}
 
  out:
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 7b3c0b8..c7a9202 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -2,7 +2,7 @@
 # Input core configuration
 #
 menuconfig INPUT_KEYBOARD
-	bool "Keyboards" if EMBEDDED || !X86
+	bool "Keyboards" if EXPERT || !X86
 	default y
 	help
 	  Say Y here, and a list of supported keyboards will be displayed.
@@ -57,7 +57,7 @@
 	  module will be called atakbd.
 
 config KEYBOARD_ATKBD
-	tristate "AT keyboard" if EMBEDDED || !X86
+	tristate "AT keyboard" if EXPERT || !X86
 	default y
 	select SERIO
 	select SERIO_LIBPS2
@@ -343,6 +343,16 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called nmk-ske-keypad.
 
+config KEYBOARD_TEGRA
+	tristate "NVIDIA Tegra internal matrix keyboard controller support"
+	depends on ARCH_TEGRA
+	help
+	  Say Y here if you want to use a matrix keyboard connected directly
+	  to the internal keyboard controller on Tegra SoCs.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called tegra-kbc.
+
 config KEYBOARD_OPENCORES
 	tristate "OpenCores Keyboard Controller"
 	help
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 4e5571b..468c627 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -42,6 +42,7 @@
 obj-$(CONFIG_KEYBOARD_STOWAWAY)		+= stowaway.o
 obj-$(CONFIG_KEYBOARD_SUNKBD)		+= sunkbd.o
 obj-$(CONFIG_KEYBOARD_TC3589X)		+= tc3589x-keypad.o
+obj-$(CONFIG_KEYBOARD_TEGRA)		+= tegra-kbc.o
 obj-$(CONFIG_KEYBOARD_TNETV107X)	+= tnetv107x-keypad.o
 obj-$(CONFIG_KEYBOARD_TWL4030)		+= twl4030_keypad.o
 obj-$(CONFIG_KEYBOARD_XTKBD)		+= xtkbd.o
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 6069abe..eb30063 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -322,7 +322,7 @@
 	struct gpio_keys_button *button = bdata->button;
 	struct input_dev *input = bdata->input;
 	unsigned int type = button->type ?: EV_KEY;
-	int state = (gpio_get_value(button->gpio) ? 1 : 0) ^ button->active_low;
+	int state = (gpio_get_value_cansleep(button->gpio) ? 1 : 0) ^ button->active_low;
 
 	input_event(input, type, button->code, !!state);
 	input_sync(input);
@@ -410,8 +410,8 @@
 	if (!button->can_disable)
 		irqflags |= IRQF_SHARED;
 
-	error = request_irq(irq, gpio_keys_isr, irqflags, desc, bdata);
-	if (error) {
+	error = request_any_context_irq(irq, gpio_keys_isr, irqflags, desc, bdata);
+	if (error < 0) {
 		dev_err(dev, "Unable to claim irq %d; error %d\n",
 			irq, error);
 		goto fail3;
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
new file mode 100644
index 0000000..ac471b7
--- /dev/null
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -0,0 +1,727 @@
+/*
+ * Keyboard class input driver for the NVIDIA Tegra SoC internal matrix
+ * keyboard controller
+ *
+ * Copyright (c) 2009-2011, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <mach/clk.h>
+#include <mach/kbc.h>
+
+#define KBC_MAX_DEBOUNCE_CNT	0x3ffu
+
+/* KBC row scan time and delay for beginning the row scan. */
+#define KBC_ROW_SCAN_TIME	16
+#define KBC_ROW_SCAN_DLY	5
+
+/* KBC uses a 32KHz clock so a cycle = 1/32Khz */
+#define KBC_CYCLE_USEC	32
+
+/* KBC Registers */
+
+/* KBC Control Register */
+#define KBC_CONTROL_0	0x0
+#define KBC_FIFO_TH_CNT_SHIFT(cnt)	(cnt << 14)
+#define KBC_DEBOUNCE_CNT_SHIFT(cnt)	(cnt << 4)
+#define KBC_CONTROL_FIFO_CNT_INT_EN	(1 << 3)
+#define KBC_CONTROL_KBC_EN		(1 << 0)
+
+/* KBC Interrupt Register */
+#define KBC_INT_0	0x4
+#define KBC_INT_FIFO_CNT_INT_STATUS	(1 << 2)
+
+#define KBC_ROW_CFG0_0	0x8
+#define KBC_COL_CFG0_0	0x18
+#define KBC_INIT_DLY_0	0x28
+#define KBC_RPT_DLY_0	0x2c
+#define KBC_KP_ENT0_0	0x30
+#define KBC_KP_ENT1_0	0x34
+#define KBC_ROW0_MASK_0	0x38
+
+#define KBC_ROW_SHIFT	3
+
+struct tegra_kbc {
+	void __iomem *mmio;
+	struct input_dev *idev;
+	unsigned int irq;
+	unsigned int wake_enable_rows;
+	unsigned int wake_enable_cols;
+	spinlock_t lock;
+	unsigned int repoll_dly;
+	unsigned long cp_dly_jiffies;
+	const struct tegra_kbc_platform_data *pdata;
+	unsigned short keycode[KBC_MAX_KEY];
+	unsigned short current_keys[KBC_MAX_KPENT];
+	unsigned int num_pressed_keys;
+	struct timer_list timer;
+	struct clk *clk;
+};
+
+static const u32 tegra_kbc_default_keymap[] = {
+	KEY(0, 2, KEY_W),
+	KEY(0, 3, KEY_S),
+	KEY(0, 4, KEY_A),
+	KEY(0, 5, KEY_Z),
+	KEY(0, 7, KEY_FN),
+
+	KEY(1, 7, KEY_LEFTMETA),
+
+	KEY(2, 6, KEY_RIGHTALT),
+	KEY(2, 7, KEY_LEFTALT),
+
+	KEY(3, 0, KEY_5),
+	KEY(3, 1, KEY_4),
+	KEY(3, 2, KEY_R),
+	KEY(3, 3, KEY_E),
+	KEY(3, 4, KEY_F),
+	KEY(3, 5, KEY_D),
+	KEY(3, 6, KEY_X),
+
+	KEY(4, 0, KEY_7),
+	KEY(4, 1, KEY_6),
+	KEY(4, 2, KEY_T),
+	KEY(4, 3, KEY_H),
+	KEY(4, 4, KEY_G),
+	KEY(4, 5, KEY_V),
+	KEY(4, 6, KEY_C),
+	KEY(4, 7, KEY_SPACE),
+
+	KEY(5, 0, KEY_9),
+	KEY(5, 1, KEY_8),
+	KEY(5, 2, KEY_U),
+	KEY(5, 3, KEY_Y),
+	KEY(5, 4, KEY_J),
+	KEY(5, 5, KEY_N),
+	KEY(5, 6, KEY_B),
+	KEY(5, 7, KEY_BACKSLASH),
+
+	KEY(6, 0, KEY_MINUS),
+	KEY(6, 1, KEY_0),
+	KEY(6, 2, KEY_O),
+	KEY(6, 3, KEY_I),
+	KEY(6, 4, KEY_L),
+	KEY(6, 5, KEY_K),
+	KEY(6, 6, KEY_COMMA),
+	KEY(6, 7, KEY_M),
+
+	KEY(7, 1, KEY_EQUAL),
+	KEY(7, 2, KEY_RIGHTBRACE),
+	KEY(7, 3, KEY_ENTER),
+	KEY(7, 7, KEY_MENU),
+
+	KEY(8, 4, KEY_RIGHTSHIFT),
+	KEY(8, 5, KEY_LEFTSHIFT),
+
+	KEY(9, 5, KEY_RIGHTCTRL),
+	KEY(9, 7, KEY_LEFTCTRL),
+
+	KEY(11, 0, KEY_LEFTBRACE),
+	KEY(11, 1, KEY_P),
+	KEY(11, 2, KEY_APOSTROPHE),
+	KEY(11, 3, KEY_SEMICOLON),
+	KEY(11, 4, KEY_SLASH),
+	KEY(11, 5, KEY_DOT),
+
+	KEY(12, 0, KEY_F10),
+	KEY(12, 1, KEY_F9),
+	KEY(12, 2, KEY_BACKSPACE),
+	KEY(12, 3, KEY_3),
+	KEY(12, 4, KEY_2),
+	KEY(12, 5, KEY_UP),
+	KEY(12, 6, KEY_PRINT),
+	KEY(12, 7, KEY_PAUSE),
+
+	KEY(13, 0, KEY_INSERT),
+	KEY(13, 1, KEY_DELETE),
+	KEY(13, 3, KEY_PAGEUP),
+	KEY(13, 4, KEY_PAGEDOWN),
+	KEY(13, 5, KEY_RIGHT),
+	KEY(13, 6, KEY_DOWN),
+	KEY(13, 7, KEY_LEFT),
+
+	KEY(14, 0, KEY_F11),
+	KEY(14, 1, KEY_F12),
+	KEY(14, 2, KEY_F8),
+	KEY(14, 3, KEY_Q),
+	KEY(14, 4, KEY_F4),
+	KEY(14, 5, KEY_F3),
+	KEY(14, 6, KEY_1),
+	KEY(14, 7, KEY_F7),
+
+	KEY(15, 0, KEY_ESC),
+	KEY(15, 1, KEY_GRAVE),
+	KEY(15, 2, KEY_F5),
+	KEY(15, 3, KEY_TAB),
+	KEY(15, 4, KEY_F1),
+	KEY(15, 5, KEY_F2),
+	KEY(15, 6, KEY_CAPSLOCK),
+	KEY(15, 7, KEY_F6),
+};
+
+static const struct matrix_keymap_data tegra_kbc_default_keymap_data = {
+	.keymap		= tegra_kbc_default_keymap,
+	.keymap_size	= ARRAY_SIZE(tegra_kbc_default_keymap),
+};
+
+static void tegra_kbc_report_released_keys(struct input_dev *input,
+					   unsigned short old_keycodes[],
+					   unsigned int old_num_keys,
+					   unsigned short new_keycodes[],
+					   unsigned int new_num_keys)
+{
+	unsigned int i, j;
+
+	for (i = 0; i < old_num_keys; i++) {
+		for (j = 0; j < new_num_keys; j++)
+			if (old_keycodes[i] == new_keycodes[j])
+				break;
+
+		if (j == new_num_keys)
+			input_report_key(input, old_keycodes[i], 0);
+	}
+}
+
+static void tegra_kbc_report_pressed_keys(struct input_dev *input,
+					  unsigned char scancodes[],
+					  unsigned short keycodes[],
+					  unsigned int num_pressed_keys)
+{
+	unsigned int i;
+
+	for (i = 0; i < num_pressed_keys; i++) {
+		input_event(input, EV_MSC, MSC_SCAN, scancodes[i]);
+		input_report_key(input, keycodes[i], 1);
+	}
+}
+
+static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
+{
+	unsigned char scancodes[KBC_MAX_KPENT];
+	unsigned short keycodes[KBC_MAX_KPENT];
+	u32 val = 0;
+	unsigned int i;
+	unsigned int num_down = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&kbc->lock, flags);
+	for (i = 0; i < KBC_MAX_KPENT; i++) {
+		if ((i % 4) == 0)
+			val = readl(kbc->mmio + KBC_KP_ENT0_0 + i);
+
+		if (val & 0x80) {
+			unsigned int col = val & 0x07;
+			unsigned int row = (val >> 3) & 0x0f;
+			unsigned char scancode =
+				MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT);
+
+			scancodes[num_down] = scancode;
+			keycodes[num_down++] = kbc->keycode[scancode];
+		}
+
+		val >>= 8;
+	}
+	spin_unlock_irqrestore(&kbc->lock, flags);
+
+	tegra_kbc_report_released_keys(kbc->idev,
+				       kbc->current_keys, kbc->num_pressed_keys,
+				       keycodes, num_down);
+	tegra_kbc_report_pressed_keys(kbc->idev, scancodes, keycodes, num_down);
+	input_sync(kbc->idev);
+
+	memcpy(kbc->current_keys, keycodes, sizeof(kbc->current_keys));
+	kbc->num_pressed_keys = num_down;
+}
+
+static void tegra_kbc_keypress_timer(unsigned long data)
+{
+	struct tegra_kbc *kbc = (struct tegra_kbc *)data;
+	unsigned long flags;
+	u32 val;
+	unsigned int i;
+
+	val = (readl(kbc->mmio + KBC_INT_0) >> 4) & 0xf;
+	if (val) {
+		unsigned long dly;
+
+		tegra_kbc_report_keys(kbc);
+
+		/*
+		 * If more than one keys are pressed we need not wait
+		 * for the repoll delay.
+		 */
+		dly = (val == 1) ? kbc->repoll_dly : 1;
+		mod_timer(&kbc->timer, jiffies + msecs_to_jiffies(dly));
+	} else {
+		/* Release any pressed keys and exit the polling loop */
+		for (i = 0; i < kbc->num_pressed_keys; i++)
+			input_report_key(kbc->idev, kbc->current_keys[i], 0);
+		input_sync(kbc->idev);
+
+		kbc->num_pressed_keys = 0;
+
+		/* All keys are released so enable the keypress interrupt */
+		spin_lock_irqsave(&kbc->lock, flags);
+		val = readl(kbc->mmio + KBC_CONTROL_0);
+		val |= KBC_CONTROL_FIFO_CNT_INT_EN;
+		writel(val, kbc->mmio + KBC_CONTROL_0);
+		spin_unlock_irqrestore(&kbc->lock, flags);
+	}
+}
+
+static irqreturn_t tegra_kbc_isr(int irq, void *args)
+{
+	struct tegra_kbc *kbc = args;
+	u32 val, ctl;
+
+	/*
+	 * Until all keys are released, defer further processing to
+	 * the polling loop in tegra_kbc_keypress_timer
+	 */
+	ctl = readl(kbc->mmio + KBC_CONTROL_0);
+	ctl &= ~KBC_CONTROL_FIFO_CNT_INT_EN;
+	writel(ctl, kbc->mmio + KBC_CONTROL_0);
+
+	/*
+	 * Quickly bail out & reenable interrupts if the fifo threshold
+	 * count interrupt wasn't the interrupt source
+	 */
+	val = readl(kbc->mmio + KBC_INT_0);
+	writel(val, kbc->mmio + KBC_INT_0);
+
+	if (val & KBC_INT_FIFO_CNT_INT_STATUS) {
+		/*
+		 * Schedule timer to run when hardware is in continuous
+		 * polling mode.
+		 */
+		mod_timer(&kbc->timer, jiffies + kbc->cp_dly_jiffies);
+	} else {
+		ctl |= KBC_CONTROL_FIFO_CNT_INT_EN;
+		writel(ctl, kbc->mmio + KBC_CONTROL_0);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void tegra_kbc_setup_wakekeys(struct tegra_kbc *kbc, bool filter)
+{
+	const struct tegra_kbc_platform_data *pdata = kbc->pdata;
+	int i;
+	unsigned int rst_val;
+
+	BUG_ON(pdata->wake_cnt > KBC_MAX_KEY);
+	rst_val = (filter && pdata->wake_cnt) ? ~0 : 0;
+
+	for (i = 0; i < KBC_MAX_ROW; i++)
+		writel(rst_val, kbc->mmio + KBC_ROW0_MASK_0 + i * 4);
+
+	if (filter) {
+		for (i = 0; i < pdata->wake_cnt; i++) {
+			u32 val, addr;
+			addr = pdata->wake_cfg[i].row * 4 + KBC_ROW0_MASK_0;
+			val = readl(kbc->mmio + addr);
+			val &= ~(1 << pdata->wake_cfg[i].col);
+			writel(val, kbc->mmio + addr);
+		}
+	}
+}
+
+static void tegra_kbc_config_pins(struct tegra_kbc *kbc)
+{
+	const struct tegra_kbc_platform_data *pdata = kbc->pdata;
+	int i;
+
+	for (i = 0; i < KBC_MAX_GPIO; i++) {
+		u32 r_shft = 5 * (i % 6);
+		u32 c_shft = 4 * (i % 8);
+		u32 r_mask = 0x1f << r_shft;
+		u32 c_mask = 0x0f << c_shft;
+		u32 r_offs = (i / 6) * 4 + KBC_ROW_CFG0_0;
+		u32 c_offs = (i / 8) * 4 + KBC_COL_CFG0_0;
+		u32 row_cfg = readl(kbc->mmio + r_offs);
+		u32 col_cfg = readl(kbc->mmio + c_offs);
+
+		row_cfg &= ~r_mask;
+		col_cfg &= ~c_mask;
+
+		if (pdata->pin_cfg[i].is_row)
+			row_cfg |= ((pdata->pin_cfg[i].num << 1) | 1) << r_shft;
+		else
+			col_cfg |= ((pdata->pin_cfg[i].num << 1) | 1) << c_shft;
+
+		writel(row_cfg, kbc->mmio + r_offs);
+		writel(col_cfg, kbc->mmio + c_offs);
+	}
+}
+
+static int tegra_kbc_start(struct tegra_kbc *kbc)
+{
+	const struct tegra_kbc_platform_data *pdata = kbc->pdata;
+	unsigned long flags;
+	unsigned int debounce_cnt;
+	u32 val = 0;
+
+	clk_enable(kbc->clk);
+
+	/* Reset the KBC controller to clear all previous status.*/
+	tegra_periph_reset_assert(kbc->clk);
+	udelay(100);
+	tegra_periph_reset_deassert(kbc->clk);
+	udelay(100);
+
+	tegra_kbc_config_pins(kbc);
+	tegra_kbc_setup_wakekeys(kbc, false);
+
+	writel(pdata->repeat_cnt, kbc->mmio + KBC_RPT_DLY_0);
+
+	/* Keyboard debounce count is maximum of 12 bits. */
+	debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
+	val = KBC_DEBOUNCE_CNT_SHIFT(debounce_cnt);
+	val |= KBC_FIFO_TH_CNT_SHIFT(1); /* set fifo interrupt threshold to 1 */
+	val |= KBC_CONTROL_FIFO_CNT_INT_EN;  /* interrupt on FIFO threshold */
+	val |= KBC_CONTROL_KBC_EN;     /* enable */
+	writel(val, kbc->mmio + KBC_CONTROL_0);
+
+	/*
+	 * Compute the delay(ns) from interrupt mode to continuous polling
+	 * mode so the timer routine is scheduled appropriately.
+	 */
+	val = readl(kbc->mmio + KBC_INIT_DLY_0);
+	kbc->cp_dly_jiffies = usecs_to_jiffies((val & 0xfffff) * 32);
+
+	kbc->num_pressed_keys = 0;
+
+	/*
+	 * Atomically clear out any remaining entries in the key FIFO
+	 * and enable keyboard interrupts.
+	 */
+	spin_lock_irqsave(&kbc->lock, flags);
+	while (1) {
+		val = readl(kbc->mmio + KBC_INT_0);
+		val >>= 4;
+		if (!val)
+			break;
+
+		val = readl(kbc->mmio + KBC_KP_ENT0_0);
+		val = readl(kbc->mmio + KBC_KP_ENT1_0);
+	}
+	writel(0x7, kbc->mmio + KBC_INT_0);
+	spin_unlock_irqrestore(&kbc->lock, flags);
+
+	enable_irq(kbc->irq);
+
+	return 0;
+}
+
+static void tegra_kbc_stop(struct tegra_kbc *kbc)
+{
+	unsigned long flags;
+	u32 val;
+
+	spin_lock_irqsave(&kbc->lock, flags);
+	val = readl(kbc->mmio + KBC_CONTROL_0);
+	val &= ~1;
+	writel(val, kbc->mmio + KBC_CONTROL_0);
+	spin_unlock_irqrestore(&kbc->lock, flags);
+
+	disable_irq(kbc->irq);
+	del_timer_sync(&kbc->timer);
+
+	clk_disable(kbc->clk);
+}
+
+static int tegra_kbc_open(struct input_dev *dev)
+{
+	struct tegra_kbc *kbc = input_get_drvdata(dev);
+
+	return tegra_kbc_start(kbc);
+}
+
+static void tegra_kbc_close(struct input_dev *dev)
+{
+	struct tegra_kbc *kbc = input_get_drvdata(dev);
+
+	return tegra_kbc_stop(kbc);
+}
+
+static bool __devinit
+tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
+			struct device *dev, unsigned int *num_rows)
+{
+	int i;
+
+	*num_rows = 0;
+
+	for (i = 0; i < KBC_MAX_GPIO; i++) {
+		const struct tegra_kbc_pin_cfg *pin_cfg = &pdata->pin_cfg[i];
+
+		if (pin_cfg->is_row) {
+			if (pin_cfg->num >= KBC_MAX_ROW) {
+				dev_err(dev,
+					"pin_cfg[%d]: invalid row number %d\n",
+					i, pin_cfg->num);
+				return false;
+			}
+			(*num_rows)++;
+		} else {
+			if (pin_cfg->num >= KBC_MAX_COL) {
+				dev_err(dev,
+					"pin_cfg[%d]: invalid column number %d\n",
+					i, pin_cfg->num);
+				return false;
+			}
+		}
+	}
+
+	return true;
+}
+
+static int __devinit tegra_kbc_probe(struct platform_device *pdev)
+{
+	const struct tegra_kbc_platform_data *pdata = pdev->dev.platform_data;
+	const struct matrix_keymap_data *keymap_data;
+	struct tegra_kbc *kbc;
+	struct input_dev *input_dev;
+	struct resource *res;
+	int irq;
+	int err;
+	int i;
+	int num_rows = 0;
+	unsigned int debounce_cnt;
+	unsigned int scan_time_rows;
+
+	if (!pdata)
+		return -EINVAL;
+
+	if (!tegra_kbc_check_pin_cfg(pdata, &pdev->dev, &num_rows))
+		return -EINVAL;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "failed to get I/O memory\n");
+		return -ENXIO;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "failed to get keyboard IRQ\n");
+		return -ENXIO;
+	}
+
+	kbc = kzalloc(sizeof(*kbc), GFP_KERNEL);
+	input_dev = input_allocate_device();
+	if (!kbc || !input_dev) {
+		err = -ENOMEM;
+		goto err_free_mem;
+	}
+
+	kbc->pdata = pdata;
+	kbc->idev = input_dev;
+	kbc->irq = irq;
+	spin_lock_init(&kbc->lock);
+	setup_timer(&kbc->timer, tegra_kbc_keypress_timer, (unsigned long)kbc);
+
+	res = request_mem_region(res->start, resource_size(res), pdev->name);
+	if (!res) {
+		dev_err(&pdev->dev, "failed to request I/O memory\n");
+		err = -EBUSY;
+		goto err_free_mem;
+	}
+
+	kbc->mmio = ioremap(res->start, resource_size(res));
+	if (!kbc->mmio) {
+		dev_err(&pdev->dev, "failed to remap I/O memory\n");
+		err = -ENXIO;
+		goto err_free_mem_region;
+	}
+
+	kbc->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(kbc->clk)) {
+		dev_err(&pdev->dev, "failed to get keyboard clock\n");
+		err = PTR_ERR(kbc->clk);
+		goto err_iounmap;
+	}
+
+	kbc->wake_enable_rows = 0;
+	kbc->wake_enable_cols = 0;
+	for (i = 0; i < pdata->wake_cnt; i++) {
+		kbc->wake_enable_rows |= (1 << pdata->wake_cfg[i].row);
+		kbc->wake_enable_cols |= (1 << pdata->wake_cfg[i].col);
+	}
+
+	/*
+	 * The time delay between two consecutive reads of the FIFO is
+	 * the sum of the repeat time and the time taken for scanning
+	 * the rows. There is an additional delay before the row scanning
+	 * starts. The repoll delay is computed in milliseconds.
+	 */
+	debounce_cnt = min(pdata->debounce_cnt, KBC_MAX_DEBOUNCE_CNT);
+	scan_time_rows = (KBC_ROW_SCAN_TIME + debounce_cnt) * num_rows;
+	kbc->repoll_dly = KBC_ROW_SCAN_DLY + scan_time_rows + pdata->repeat_cnt;
+	kbc->repoll_dly = ((kbc->repoll_dly * KBC_CYCLE_USEC) + 999) / 1000;
+
+	input_dev->name = pdev->name;
+	input_dev->id.bustype = BUS_HOST;
+	input_dev->dev.parent = &pdev->dev;
+	input_dev->open = tegra_kbc_open;
+	input_dev->close = tegra_kbc_close;
+
+	input_set_drvdata(input_dev, kbc);
+
+	input_dev->evbit[0] = BIT_MASK(EV_KEY);
+	input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+
+	input_dev->keycode = kbc->keycode;
+	input_dev->keycodesize = sizeof(kbc->keycode[0]);
+	input_dev->keycodemax = ARRAY_SIZE(kbc->keycode);
+
+	keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
+	matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
+				   input_dev->keycode, input_dev->keybit);
+
+	err = request_irq(kbc->irq, tegra_kbc_isr, IRQF_TRIGGER_HIGH,
+			  pdev->name, kbc);
+	if (err) {
+		dev_err(&pdev->dev, "failed to request keyboard IRQ\n");
+		goto err_put_clk;
+	}
+
+	disable_irq(kbc->irq);
+
+	err = input_register_device(kbc->idev);
+	if (err) {
+		dev_err(&pdev->dev, "failed to register input device\n");
+		goto err_free_irq;
+	}
+
+	platform_set_drvdata(pdev, kbc);
+	device_init_wakeup(&pdev->dev, pdata->wakeup);
+
+	return 0;
+
+err_free_irq:
+	free_irq(kbc->irq, pdev);
+err_put_clk:
+	clk_put(kbc->clk);
+err_iounmap:
+	iounmap(kbc->mmio);
+err_free_mem_region:
+	release_mem_region(res->start, resource_size(res));
+err_free_mem:
+	input_free_device(kbc->idev);
+	kfree(kbc);
+
+	return err;
+}
+
+static int __devexit tegra_kbc_remove(struct platform_device *pdev)
+{
+	struct tegra_kbc *kbc = platform_get_drvdata(pdev);
+	struct resource *res;
+
+	free_irq(kbc->irq, pdev);
+	clk_put(kbc->clk);
+
+	input_unregister_device(kbc->idev);
+	iounmap(kbc->mmio);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(res->start, resource_size(res));
+
+	kfree(kbc);
+
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_kbc_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct tegra_kbc *kbc = platform_get_drvdata(pdev);
+
+	if (device_may_wakeup(&pdev->dev)) {
+		tegra_kbc_setup_wakekeys(kbc, true);
+		enable_irq_wake(kbc->irq);
+		/* Forcefully clear the interrupt status */
+		writel(0x7, kbc->mmio + KBC_INT_0);
+		msleep(30);
+	} else {
+		mutex_lock(&kbc->idev->mutex);
+		if (kbc->idev->users)
+			tegra_kbc_stop(kbc);
+		mutex_unlock(&kbc->idev->mutex);
+	}
+
+	return 0;
+}
+
+static int tegra_kbc_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct tegra_kbc *kbc = platform_get_drvdata(pdev);
+	int err = 0;
+
+	if (device_may_wakeup(&pdev->dev)) {
+		disable_irq_wake(kbc->irq);
+		tegra_kbc_setup_wakekeys(kbc, false);
+	} else {
+		mutex_lock(&kbc->idev->mutex);
+		if (kbc->idev->users)
+			err = tegra_kbc_start(kbc);
+		mutex_unlock(&kbc->idev->mutex);
+	}
+
+	return err;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(tegra_kbc_pm_ops, tegra_kbc_suspend, tegra_kbc_resume);
+
+static struct platform_driver tegra_kbc_driver = {
+	.probe		= tegra_kbc_probe,
+	.remove		= __devexit_p(tegra_kbc_remove),
+	.driver	= {
+		.name	= "tegra-kbc",
+		.owner  = THIS_MODULE,
+		.pm	= &tegra_kbc_pm_ops,
+	},
+};
+
+static void __exit tegra_kbc_exit(void)
+{
+	platform_driver_unregister(&tegra_kbc_driver);
+}
+module_exit(tegra_kbc_exit);
+
+static int __init tegra_kbc_init(void)
+{
+	return platform_driver_register(&tegra_kbc_driver);
+}
+module_init(tegra_kbc_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rakesh Iyer <riyer@nvidia.com>");
+MODULE_DESCRIPTION("Tegra matrix keyboard controller driver");
+MODULE_ALIAS("platform:tegra-kbc");
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index b4a81eb..c8f097a 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -14,6 +14,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/input.h>
 #include <linux/platform_device.h>
@@ -219,9 +220,9 @@
 	}
 
 	kp->clk = clk_get(dev, NULL);
-	if (!kp->clk) {
+	if (IS_ERR(kp->clk)) {
 		dev_err(dev, "cannot claim device clock\n");
-		error = -EINVAL;
+		error = PTR_ERR(kp->clk);
 		goto error_clk;
 	}
 
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index 9dfd6e5..1f38302 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -69,11 +69,7 @@
 	}
 
 	if (value > 20 && value < 32767)
-#ifndef FREQ
-		count = (ixp4xx_get_board_tick_rate() / (value * 4)) - 1;
-#else
-		count = (FREQ / (value * 4)) - 1;
-#endif
+		count = (IXP4XX_TIMER_FREQ / (value * 4)) - 1;
 
 	ixp4xx_spkr_control(pin, count);
 
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 1f8e010..7e64d01 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -176,7 +176,7 @@
 
 	/* request the IRQs */
 	err = request_irq(encoder->irq_a, &rotary_encoder_irq,
-			  IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE,
+			  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
 			  DRV_NAME, encoder);
 	if (err) {
 		dev_err(&pdev->dev, "unable to request IRQ %d\n",
@@ -185,7 +185,7 @@
 	}
 
 	err = request_irq(encoder->irq_b, &rotary_encoder_irq,
-			  IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE,
+			  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
 			  DRV_NAME, encoder);
 	if (err) {
 		dev_err(&pdev->dev, "unable to request IRQ %d\n",
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index bf5fd7f..9c1e6ee 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -39,7 +39,7 @@
 	  module will be called psmouse.
 
 config MOUSE_PS2_ALPS
-	bool "ALPS PS/2 mouse protocol extension" if EMBEDDED
+	bool "ALPS PS/2 mouse protocol extension" if EXPERT
 	default y
 	depends on MOUSE_PS2
 	help
@@ -49,7 +49,7 @@
 	  If unsure, say Y.
 
 config MOUSE_PS2_LOGIPS2PP
-	bool "Logitech PS/2++ mouse protocol extension" if EMBEDDED
+	bool "Logitech PS/2++ mouse protocol extension" if EXPERT
 	default y
 	depends on MOUSE_PS2
 	help
@@ -59,7 +59,7 @@
 	  If unsure, say Y.
 
 config MOUSE_PS2_SYNAPTICS
-	bool "Synaptics PS/2 mouse protocol extension" if EMBEDDED
+	bool "Synaptics PS/2 mouse protocol extension" if EXPERT
 	default y
 	depends on MOUSE_PS2
 	help
@@ -69,7 +69,7 @@
 	  If unsure, say Y.
 
 config MOUSE_PS2_LIFEBOOK
-	bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EMBEDDED
+	bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EXPERT
 	default y
 	depends on MOUSE_PS2 && X86 && DMI
 	help
@@ -79,7 +79,7 @@
 	  If unsure, say Y.
 
 config MOUSE_PS2_TRACKPOINT
-	bool "IBM Trackpoint PS/2 mouse protocol extension" if EMBEDDED
+	bool "IBM Trackpoint PS/2 mouse protocol extension" if EXPERT
 	default y
 	depends on MOUSE_PS2
 	help
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index da392c2..aa186cf 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -755,23 +755,26 @@
 {
 	struct synaptics_data *priv = psmouse->private;
 	struct synaptics_data old_priv = *priv;
+	int retry = 0;
+	int error;
 
-	psmouse_reset(psmouse);
+	do {
+		psmouse_reset(psmouse);
+		error = synaptics_detect(psmouse, 0);
+	} while (error && ++retry < 3);
 
-	if (synaptics_detect(psmouse, 0))
+	if (error)
 		return -1;
 
+	if (retry > 1)
+		printk(KERN_DEBUG "Synaptics reconnected after %d tries\n",
+			retry);
+
 	if (synaptics_query_hardware(psmouse)) {
 		printk(KERN_ERR "Unable to query Synaptics hardware.\n");
 		return -1;
 	}
 
-	if (old_priv.identity != priv->identity ||
-	    old_priv.model_id != priv->model_id ||
-	    old_priv.capabilities != priv->capabilities ||
-	    old_priv.ext_cap != priv->ext_cap)
-		return -1;
-
 	if (synaptics_set_absolute_mode(psmouse)) {
 		printk(KERN_ERR "Unable to initialize Synaptics hardware.\n");
 		return -1;
@@ -782,6 +785,19 @@
 		return -1;
 	}
 
+	if (old_priv.identity != priv->identity ||
+	    old_priv.model_id != priv->model_id ||
+	    old_priv.capabilities != priv->capabilities ||
+	    old_priv.ext_cap != priv->ext_cap) {
+		printk(KERN_ERR "Synaptics hardware appears to be different: "
+			"id(%ld-%ld), model(%ld-%ld), caps(%lx-%lx), ext(%lx-%lx).\n",
+			old_priv.identity, priv->identity,
+			old_priv.model_id, priv->model_id,
+			old_priv.capabilities, priv->capabilities,
+			old_priv.ext_cap, priv->ext_cap);
+		return -1;
+	}
+
 	return 0;
 }
 
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 307eef7..55f2c22 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -2,7 +2,7 @@
 # Input core configuration
 #
 config SERIO
-	tristate "Serial I/O support" if EMBEDDED || !X86
+	tristate "Serial I/O support" if EXPERT || !X86
 	default y
 	help
 	  Say Yes here if you have any input device that uses serial I/O to
@@ -19,7 +19,7 @@
 if SERIO
 
 config SERIO_I8042
-	tristate "i8042 PC Keyboard controller" if EMBEDDED || !X86
+	tristate "i8042 PC Keyboard controller" if EXPERT || !X86
 	default y
 	depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
 		   (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN
@@ -168,7 +168,7 @@
 	  module will be called maceps2.
 
 config SERIO_LIBPS2
-	tristate "PS/2 driver library" if EMBEDDED
+	tristate "PS/2 driver library" if EXPERT
 	depends on SERIO_I8042 || SERIO_I8042=n
 	help
 	  Say Y here if you are using a driver for device connected
diff --git a/drivers/input/serio/ct82c710.c b/drivers/input/serio/ct82c710.c
index 448c772..8528165 100644
--- a/drivers/input/serio/ct82c710.c
+++ b/drivers/input/serio/ct82c710.c
@@ -111,9 +111,11 @@
 static int ct82c710_open(struct serio *serio)
 {
 	unsigned char status;
+	int err;
 
-	if (request_irq(CT82C710_IRQ, ct82c710_interrupt, 0, "ct82c710", NULL))
-		return -1;
+	err = request_irq(CT82C710_IRQ, ct82c710_interrupt, 0, "ct82c710", NULL);
+	if (err)
+		return err;
 
 	status = inb_p(CT82C710_STATUS);
 
@@ -131,7 +133,7 @@
 		status &= ~(CT82C710_ENABLE | CT82C710_INTS_ON);
 		outb_p(status, CT82C710_STATUS);
 		free_irq(CT82C710_IRQ, NULL);
-		return -1;
+		return -EBUSY;
 	}
 
 	return 0;
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index db5b0bc..7c38d1f 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -188,7 +188,8 @@
 	kfree(event);
 }
 
-static void serio_remove_duplicate_events(struct serio_event *event)
+static void serio_remove_duplicate_events(void *object,
+					  enum serio_event_type type)
 {
 	struct serio_event *e, *next;
 	unsigned long flags;
@@ -196,13 +197,13 @@
 	spin_lock_irqsave(&serio_event_lock, flags);
 
 	list_for_each_entry_safe(e, next, &serio_event_list, node) {
-		if (event->object == e->object) {
+		if (object == e->object) {
 			/*
 			 * If this event is of different type we should not
 			 * look further - we only suppress duplicate events
 			 * that were sent back-to-back.
 			 */
-			if (event->type != e->type)
+			if (type != e->type)
 				break;
 
 			list_del_init(&e->node);
@@ -245,7 +246,7 @@
 			break;
 		}
 
-		serio_remove_duplicate_events(event);
+		serio_remove_duplicate_events(event->object, event->type);
 		serio_free_event(event);
 	}
 
@@ -436,10 +437,12 @@
 	} else if (!strncmp(buf, "rescan", count)) {
 		serio_disconnect_port(serio);
 		serio_find_driver(serio);
+		serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
 	} else if ((drv = driver_find(buf, &serio_bus)) != NULL) {
 		serio_disconnect_port(serio);
 		error = serio_bind_driver(serio, to_serio_driver(drv));
 		put_driver(drv);
+		serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
 	} else {
 		error = -EINVAL;
 	}
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index 6e362de..8755f5f 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -116,14 +116,15 @@
 
 /*
  * serport_ldisc_receive() is called by the low level tty driver when characters
- * are ready for us. We forward the characters, one by one to the 'interrupt'
- * routine.
+ * are ready for us. We forward the characters and flags, one by one to the
+ * 'interrupt' routine.
  */
 
 static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
 {
 	struct serport *serport = (struct serport*) tty->disc_data;
 	unsigned long flags;
+	unsigned int ch_flags;
 	int i;
 
 	spin_lock_irqsave(&serport->lock, flags);
@@ -131,8 +132,23 @@
 	if (!test_bit(SERPORT_ACTIVE, &serport->flags))
 		goto out;
 
-	for (i = 0; i < count; i++)
-		serio_interrupt(serport->serio, cp[i], 0);
+	for (i = 0; i < count; i++) {
+		switch (fp[i]) {
+		case TTY_FRAME:
+			ch_flags = SERIO_FRAME;
+			break;
+
+		case TTY_PARITY:
+			ch_flags = SERIO_PARITY;
+			break;
+
+		default:
+			ch_flags = 0;
+			break;
+		}
+
+		serio_interrupt(serport->serio, cp[i], ch_flags);
+	}
 
 out:
 	spin_unlock_irqrestore(&serport->lock, flags);
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index a29a7812..7729e54 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -201,6 +201,7 @@
 			break;
 
 		case KE_SW:
+		case KE_VSW:
 			__set_bit(EV_SW, dev->evbit);
 			__set_bit(entry->sw.code, dev->swbit);
 			break;
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index fc38149..cf8fb9f 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -519,7 +519,7 @@
 	/* Retrieve the physical and logical size for OEM devices */
 	error = wacom_retrieve_hid_descriptor(intf, features);
 	if (error)
-		goto fail2;
+		goto fail3;
 
 	wacom_setup_device_quirks(features);
 
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 5187829..367fa82 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1101,6 +1101,13 @@
 	}
 }
 
+static unsigned int wacom_calculate_touch_res(unsigned int logical_max,
+					      unsigned int physical_max)
+{
+       /* Touch physical dimensions are in 100th of mm */
+       return (logical_max * 100) / physical_max;
+}
+
 void wacom_setup_input_capabilities(struct input_dev *input_dev,
 				    struct wacom_wac *wacom_wac)
 {
@@ -1228,8 +1235,12 @@
 	case TABLETPC:
 		if (features->device_type == BTN_TOOL_DOUBLETAP ||
 		    features->device_type == BTN_TOOL_TRIPLETAP) {
-			input_set_abs_params(input_dev, ABS_RX, 0, features->x_phy, 0, 0);
-			input_set_abs_params(input_dev, ABS_RY, 0, features->y_phy, 0, 0);
+			input_abs_set_res(input_dev, ABS_X,
+				wacom_calculate_touch_res(features->x_max,
+							features->x_phy));
+			input_abs_set_res(input_dev, ABS_Y,
+				wacom_calculate_touch_res(features->y_max,
+							features->y_phy));
 			__set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
 		}
 
@@ -1272,6 +1283,12 @@
 			input_set_abs_params(input_dev, ABS_MT_PRESSURE,
 					     0, features->pressure_max,
 					     features->pressure_fuzz, 0);
+			input_abs_set_res(input_dev, ABS_X,
+				wacom_calculate_touch_res(features->x_max,
+							features->x_phy));
+			input_abs_set_res(input_dev, ABS_Y,
+				wacom_calculate_touch_res(features->y_max,
+							features->y_phy));
 		} else if (features->device_type == BTN_TOOL_PEN) {
 			__set_bit(BTN_TOOL_RUBBER, input_dev->keybit);
 			__set_bit(BTN_TOOL_PEN, input_dev->keybit);
@@ -1426,6 +1443,10 @@
 	{ "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN,     21648, 13530, 1023, 63, BAMBOO_PT };
 static const struct wacom_features wacom_features_0xD4 =
 	{ "Wacom Bamboo Pen",     WACOM_PKGLEN_BBFUN,     14720,  9200,  255, 63, BAMBOO_PT };
+static struct wacom_features wacom_features_0xD6 =
+	{ "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN,   14720,  9200, 1023, 63, BAMBOO_PT };
+static struct wacom_features wacom_features_0xD7 =
+	{ "Wacom BambooPT 2FG Small", WACOM_PKGLEN_BBFUN, 14720,  9200, 1023, 63, BAMBOO_PT };
 static struct wacom_features wacom_features_0xD8 =
 	{ "Wacom Bamboo Comic 2FG", WACOM_PKGLEN_BBFUN,   21648, 13530, 1023, 63, BAMBOO_PT };
 static struct wacom_features wacom_features_0xDA =
@@ -1507,6 +1528,8 @@
 	{ USB_DEVICE_WACOM(0xD2) },
 	{ USB_DEVICE_WACOM(0xD3) },
 	{ USB_DEVICE_WACOM(0xD4) },
+	{ USB_DEVICE_WACOM(0xD6) },
+	{ USB_DEVICE_WACOM(0xD7) },
 	{ USB_DEVICE_WACOM(0xD8) },
 	{ USB_DEVICE_WACOM(0xDA) },
 	{ USB_DEVICE_WACOM(0xDB) },
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 0c9f4b1..61834ae 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -540,62 +540,62 @@
 
 config TOUCHSCREEN_USB_EGALAX
 	default y
-	bool "eGalax, eTurboTouch CT-410/510/700 device support" if EMBEDDED
+	bool "eGalax, eTurboTouch CT-410/510/700 device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_PANJIT
 	default y
-	bool "PanJit device support" if EMBEDDED
+	bool "PanJit device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_3M
 	default y
-	bool "3M/Microtouch EX II series device support" if EMBEDDED
+	bool "3M/Microtouch EX II series device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_ITM
 	default y
-	bool "ITM device support" if EMBEDDED
+	bool "ITM device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_ETURBO
 	default y
-	bool "eTurboTouch (non-eGalax compatible) device support" if EMBEDDED
+	bool "eTurboTouch (non-eGalax compatible) device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_GUNZE
 	default y
-	bool "Gunze AHL61 device support" if EMBEDDED
+	bool "Gunze AHL61 device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_DMC_TSC10
 	default y
-	bool "DMC TSC-10/25 device support" if EMBEDDED
+	bool "DMC TSC-10/25 device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_IRTOUCH
 	default y
-	bool "IRTOUCHSYSTEMS/UNITOP device support" if EMBEDDED
+	bool "IRTOUCHSYSTEMS/UNITOP device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_IDEALTEK
 	default y
-	bool "IdealTEK URTC1000 device support" if EMBEDDED
+	bool "IdealTEK URTC1000 device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_GENERAL_TOUCH
 	default y
-	bool "GeneralTouch Touchscreen device support" if EMBEDDED
+	bool "GeneralTouch Touchscreen device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_GOTOP
 	default y
-	bool "GoTop Super_Q2/GogoPen/PenPower tablet device support" if EMBEDDED
+	bool "GoTop Super_Q2/GogoPen/PenPower tablet device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_JASTEC
 	default y
-	bool "JASTEC/DigiTech DTR-02U USB touch controller device support" if EMBEDDED
+	bool "JASTEC/DigiTech DTR-02U USB touch controller device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_E2I
@@ -605,17 +605,17 @@
 
 config TOUCHSCREEN_USB_ZYTRONIC
 	default y
-	bool "Zytronic controller" if EMBEDDED
+	bool "Zytronic controller" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_ETT_TC45USB
 	default y
-	bool "ET&T USB series TC4UM/TC5UH touchscreen controller support" if EMBEDDED
+	bool "ET&T USB series TC4UM/TC5UH touchscreen controller support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_NEXIO
 	default y
-	bool "NEXIO/iNexio device support" if EMBEDDED
+	bool "NEXIO/iNexio device support" if EXPERT
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_TOUCHIT213
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 14ea54b..4bf2316 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -941,28 +941,29 @@
 	struct ads7846_platform_data *pdata = spi->dev.platform_data;
 	int err;
 
-	/* REVISIT when the irq can be triggered active-low, or if for some
+	/*
+	 * REVISIT when the irq can be triggered active-low, or if for some
 	 * reason the touchscreen isn't hooked up, we don't need to access
 	 * the pendown state.
 	 */
-	if (!pdata->get_pendown_state && !gpio_is_valid(pdata->gpio_pendown)) {
-		dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n");
-		return -EINVAL;
-	}
 
 	if (pdata->get_pendown_state) {
 		ts->get_pendown_state = pdata->get_pendown_state;
-		return 0;
-	}
+	} else if (gpio_is_valid(pdata->gpio_pendown)) {
 
-	err = gpio_request(pdata->gpio_pendown, "ads7846_pendown");
-	if (err) {
-		dev_err(&spi->dev, "failed to request pendown GPIO%d\n",
-			pdata->gpio_pendown);
-		return err;
-	}
+		err = gpio_request(pdata->gpio_pendown, "ads7846_pendown");
+		if (err) {
+			dev_err(&spi->dev, "failed to request pendown GPIO%d\n",
+				pdata->gpio_pendown);
+			return err;
+		}
 
-	ts->gpio_pendown = pdata->gpio_pendown;
+		ts->gpio_pendown = pdata->gpio_pendown;
+
+	} else {
+		dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n");
+		return -EINVAL;
+	}
 
 	return 0;
 }
@@ -1353,7 +1354,7 @@
  err_put_regulator:
 	regulator_put(ts->reg);
  err_free_gpio:
-	if (ts->gpio_pendown != -1)
+	if (!ts->get_pendown_state)
 		gpio_free(ts->gpio_pendown);
  err_cleanup_filter:
 	if (ts->filter_cleanup)
@@ -1383,8 +1384,13 @@
 	regulator_disable(ts->reg);
 	regulator_put(ts->reg);
 
-	if (ts->gpio_pendown != -1)
+	if (!ts->get_pendown_state) {
+		/*
+		 * If we are not using specialized pendown method we must
+		 * have been relying on gpio we set up ourselves.
+		 */
 		gpio_free(ts->gpio_pendown);
+	}
 
 	if (ts->filter_cleanup)
 		ts->filter_cleanup(ts->filter_data);
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index f7fa9ef..1507ce1 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -12,6 +12,7 @@
 #include <linux/input.h>
 #include <linux/input/bu21013.h>
 #include <linux/slab.h>
+#include <linux/regulator/consumer.h>
 
 #define PEN_DOWN_INTR	0
 #define MAX_FINGERS	2
@@ -139,6 +140,7 @@
  * @chip: pointer to the touch panel controller
  * @in_dev: pointer to the input device structure
  * @intr_pin: interrupt pin value
+ * @regulator: pointer to the Regulator used for touch screen
  *
  * Touch panel device data structure
  */
@@ -149,6 +151,7 @@
 	const struct bu21013_platform_device *chip;
 	struct input_dev *in_dev;
 	unsigned int intr_pin;
+	struct regulator *regulator;
 };
 
 /**
@@ -456,6 +459,20 @@
 	bu21013_data->in_dev = in_dev;
 	bu21013_data->chip = pdata;
 	bu21013_data->client = client;
+
+	bu21013_data->regulator = regulator_get(&client->dev, "V-TOUCH");
+	if (IS_ERR(bu21013_data->regulator)) {
+		dev_err(&client->dev, "regulator_get failed\n");
+		error = PTR_ERR(bu21013_data->regulator);
+		goto err_free_mem;
+	}
+
+	error = regulator_enable(bu21013_data->regulator);
+	if (error < 0) {
+		dev_err(&client->dev, "regulator enable failed\n");
+		goto err_put_regulator;
+	}
+
 	bu21013_data->touch_stopped = false;
 	init_waitqueue_head(&bu21013_data->wait);
 
@@ -464,7 +481,7 @@
 		error = pdata->cs_en(pdata->cs_pin);
 		if (error < 0) {
 			dev_err(&client->dev, "chip init failed\n");
-			goto err_free_mem;
+			goto err_disable_regulator;
 		}
 	}
 
@@ -485,9 +502,9 @@
 	__set_bit(EV_ABS, in_dev->evbit);
 
 	input_set_abs_params(in_dev, ABS_MT_POSITION_X, 0,
-						pdata->x_max_res, 0, 0);
+						pdata->touch_x_max, 0, 0);
 	input_set_abs_params(in_dev, ABS_MT_POSITION_Y, 0,
-						pdata->y_max_res, 0, 0);
+						pdata->touch_y_max, 0, 0);
 	input_set_drvdata(in_dev, bu21013_data);
 
 	error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
@@ -513,6 +530,10 @@
 	bu21013_free_irq(bu21013_data);
 err_cs_disable:
 	pdata->cs_dis(pdata->cs_pin);
+err_disable_regulator:
+	regulator_disable(bu21013_data->regulator);
+err_put_regulator:
+	regulator_put(bu21013_data->regulator);
 err_free_mem:
 	input_free_device(in_dev);
 	kfree(bu21013_data);
@@ -535,6 +556,10 @@
 	bu21013_data->chip->cs_dis(bu21013_data->chip->cs_pin);
 
 	input_unregister_device(bu21013_data->in_dev);
+
+	regulator_disable(bu21013_data->regulator);
+	regulator_put(bu21013_data->regulator);
+
 	kfree(bu21013_data);
 
 	device_init_wakeup(&client->dev, false);
@@ -561,6 +586,8 @@
 	else
 		disable_irq(bu21013_data->chip->irq);
 
+	regulator_disable(bu21013_data->regulator);
+
 	return 0;
 }
 
@@ -577,6 +604,12 @@
 	struct i2c_client *client = bu21013_data->client;
 	int retval;
 
+	retval = regulator_enable(bu21013_data->regulator);
+	if (retval < 0) {
+		dev_err(&client->dev, "bu21013 regulator enable failed\n");
+		return retval;
+	}
+
 	retval = bu21013_init_chip(bu21013_data);
 	if (retval < 0) {
 		dev_err(&client->dev, "bu21013 controller config failed\n");
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
index cf1dba2..22a3411 100644
--- a/drivers/input/touchscreen/tnetv107x-ts.c
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -14,6 +14,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/input.h>
 #include <linux/platform_device.h>
@@ -289,9 +290,9 @@
 	}
 
 	ts->clk = clk_get(dev, NULL);
-	if (!ts->clk) {
+	if (IS_ERR(ts->clk)) {
 		dev_err(dev, "cannot claim device clock\n");
-		error = -EINVAL;
+		error = PTR_ERR(ts->clk);
 		goto error_clk;
 	}
 
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index 5cb8449..c14412e 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -51,6 +51,10 @@
 #define W8001_PKTLEN_TPCCTL	11	/* control packet */
 #define W8001_PKTLEN_TOUCH2FG	13
 
+/* resolution in points/mm */
+#define W8001_PEN_RESOLUTION    100
+#define W8001_TOUCH_RESOLUTION  10
+
 struct w8001_coord {
 	u8 rdy;
 	u8 tsw;
@@ -198,7 +202,7 @@
 		query->y = 1024;
 		if (query->panel_res)
 			query->x = query->y = (1 << query->panel_res);
-		query->panel_res = 10;
+		query->panel_res = W8001_TOUCH_RESOLUTION;
 	}
 }
 
@@ -394,6 +398,8 @@
 
 		input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0);
 		input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0);
+		input_abs_set_res(dev, ABS_X, W8001_PEN_RESOLUTION);
+		input_abs_set_res(dev, ABS_Y, W8001_PEN_RESOLUTION);
 		input_set_abs_params(dev, ABS_PRESSURE, 0, coord.pen_pressure, 0, 0);
 		if (coord.tilt_x && coord.tilt_y) {
 			input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0);
@@ -418,14 +424,17 @@
 		w8001->max_touch_x = touch.x;
 		w8001->max_touch_y = touch.y;
 
-		/* scale to pen maximum */
 		if (w8001->max_pen_x && w8001->max_pen_y) {
+			/* if pen is supported scale to pen maximum */
 			touch.x = w8001->max_pen_x;
 			touch.y = w8001->max_pen_y;
+			touch.panel_res = W8001_PEN_RESOLUTION;
 		}
 
 		input_set_abs_params(dev, ABS_X, 0, touch.x, 0, 0);
 		input_set_abs_params(dev, ABS_Y, 0, touch.y, 0, 0);
+		input_abs_set_res(dev, ABS_X, touch.panel_res);
+		input_abs_set_res(dev, ABS_Y, touch.panel_res);
 
 		switch (touch.sensor_id) {
 		case 0:
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c
index 0858791..cfff0c4 100644
--- a/drivers/isdn/hisax/isdnl2.c
+++ b/drivers/isdn/hisax/isdnl2.c
@@ -1247,10 +1247,10 @@
 l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
 {
 	struct PStack *st = fi->userdata;
-	struct sk_buff *skb, *oskb;
+	struct sk_buff *skb;
 	struct Layer2 *l2 = &st->l2;
 	u_char header[MAX_HEADER_LEN];
-	int i;
+	int i, hdr_space_needed;
 	int unsigned p1;
 	u_long flags;
 
@@ -1261,6 +1261,16 @@
 	if (!skb)
 		return;
 
+	hdr_space_needed = l2headersize(l2, 0);
+	if (hdr_space_needed > skb_headroom(skb)) {
+		struct sk_buff *orig_skb = skb;
+
+		skb = skb_realloc_headroom(skb, hdr_space_needed);
+		if (!skb) {
+			dev_kfree_skb(orig_skb);
+			return;
+		}
+	}
 	spin_lock_irqsave(&l2->lock, flags);
 	if(test_bit(FLG_MOD128, &l2->flag))
 		p1 = (l2->vs - l2->va) % 128;
@@ -1285,19 +1295,7 @@
 		l2->vs = (l2->vs + 1) % 8;
 	}
 	spin_unlock_irqrestore(&l2->lock, flags);
-	p1 = skb->data - skb->head;
-	if (p1 >= i)
-		memcpy(skb_push(skb, i), header, i);
-	else {
-		printk(KERN_WARNING
-		"isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
-		oskb = skb;
-		skb = alloc_skb(oskb->len + i, GFP_ATOMIC);
-		memcpy(skb_put(skb, i), header, i);
-		skb_copy_from_linear_data(oskb,
-					  skb_put(skb, oskb->len), oskb->len);
-		dev_kfree_skb(oskb);
-	}
+	memcpy(skb_push(skb, i), header, i);
 	st->l2.l2l1(st, PH_PULL | INDICATION, skb);
 	test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
 	if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
diff --git a/drivers/isdn/hysdn/hysdn_defs.h b/drivers/isdn/hysdn/hysdn_defs.h
index 729df40..18b801a 100644
--- a/drivers/isdn/hysdn/hysdn_defs.h
+++ b/drivers/isdn/hysdn/hysdn_defs.h
@@ -227,7 +227,6 @@
 /*************************/
 /* im/exported functions */
 /*************************/
-extern char *hysdn_getrev(const char *);
 
 /* hysdn_procconf.c */
 extern int hysdn_procconf_init(void);	/* init proc config filesys */
@@ -259,7 +258,6 @@
 
 /* hysdn_net.c */
 extern unsigned int hynet_enable; 
-extern char *hysdn_net_revision;
 extern int hysdn_net_create(hysdn_card *);	/* create a new net device */
 extern int hysdn_net_release(hysdn_card *);	/* delete the device */
 extern char *hysdn_net_getname(hysdn_card *);	/* get name of net interface */
diff --git a/drivers/isdn/hysdn/hysdn_init.c b/drivers/isdn/hysdn/hysdn_init.c
index b7cc5c2..0ab42ac 100644
--- a/drivers/isdn/hysdn/hysdn_init.c
+++ b/drivers/isdn/hysdn/hysdn_init.c
@@ -36,7 +36,6 @@
 MODULE_AUTHOR("Werner Cornelius");
 MODULE_LICENSE("GPL");
 
-static char *hysdn_init_revision = "$Revision: 1.6.6.6 $";
 static int cardmax;		/* number of found cards */
 hysdn_card *card_root = NULL;	/* pointer to first card */
 static hysdn_card *card_last = NULL;	/* pointer to first card */
@@ -49,25 +48,6 @@
 /* Additionally newer versions may be activated without rebooting.          */
 /****************************************************************************/
 
-/******************************************************/
-/* extract revision number from string for log output */
-/******************************************************/
-char *
-hysdn_getrev(const char *revision)
-{
-	char *rev;
-	char *p;
-
-	if ((p = strchr(revision, ':'))) {
-		rev = p + 2;
-		p = strchr(rev, '$');
-		*--p = 0;
-	} else
-		rev = "???";
-	return rev;
-}
-
-
 /****************************************************************************/
 /* init_module is called once when the module is loaded to do all necessary */
 /* things like autodetect...                                                */
@@ -175,13 +155,9 @@
 static int __init
 hysdn_init(void)
 {
-	char tmp[50];
 	int rc;
 
-	strcpy(tmp, hysdn_init_revision);
-	printk(KERN_NOTICE "HYSDN: module Rev: %s loaded\n", hysdn_getrev(tmp));
-	strcpy(tmp, hysdn_net_revision);
-	printk(KERN_NOTICE "HYSDN: network interface Rev: %s \n", hysdn_getrev(tmp));
+	printk(KERN_NOTICE "HYSDN: module loaded\n");
 
 	rc = pci_register_driver(&hysdn_pci_driver);
 	if (rc)
diff --git a/drivers/isdn/hysdn/hysdn_net.c b/drivers/isdn/hysdn/hysdn_net.c
index feec8d8..11f2cce 100644
--- a/drivers/isdn/hysdn/hysdn_net.c
+++ b/drivers/isdn/hysdn/hysdn_net.c
@@ -26,9 +26,6 @@
 unsigned int hynet_enable = 0xffffffff; 
 module_param(hynet_enable, uint, 0);
 
-/* store the actual version for log reporting */
-char *hysdn_net_revision = "$Revision: 1.8.6.4 $";
-
 #define MAX_SKB_BUFFERS 20	/* number of buffers for keeping TX-data */
 
 /****************************************************************************/
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c
index 96b3e39..5fe83bd 100644
--- a/drivers/isdn/hysdn/hysdn_procconf.c
+++ b/drivers/isdn/hysdn/hysdn_procconf.c
@@ -23,7 +23,6 @@
 #include "hysdn_defs.h"
 
 static DEFINE_MUTEX(hysdn_conf_mutex);
-static char *hysdn_procconf_revision = "$Revision: 1.8.6.4 $";
 
 #define INFO_OUT_LEN 80		/* length of info line including lf */
 
@@ -404,7 +403,7 @@
 		card = card->next;	/* next entry */
 	}
 
-	printk(KERN_NOTICE "HYSDN: procfs Rev. %s initialised\n", hysdn_getrev(hysdn_procconf_revision));
+	printk(KERN_NOTICE "HYSDN: procfs initialised\n");
 	return (0);
 }				/* hysdn_procconf_init */
 
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index f2b5bab..1f355bb 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -1627,7 +1627,7 @@
 static int __init icn_init(void)
 {
 	char *p;
-	char rev[20];
+	char rev[21];
 
 	memset(&dev, 0, sizeof(icn_dev));
 	dev.memaddr = (membase & 0x0ffc000);
@@ -1638,6 +1638,7 @@
 
 	if ((p = strchr(revision, ':'))) {
 		strncpy(rev, p + 1, 20);
+		rev[20] = '\0';
 		p = strchr(rev, '$');
 		if (p)
 			*p = 0;
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index da3fa8d..666daf7 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -69,6 +69,7 @@
 		led_dat->pwm = pwm_request(cur_led->pwm_id,
 				cur_led->name);
 		if (IS_ERR(led_dat->pwm)) {
+			ret = PTR_ERR(led_dat->pwm);
 			dev_err(&pdev->dev, "unable to request PWM %d\n",
 					cur_led->pwm_id);
 			goto err;
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index 991d93b..ecc4bf3 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -99,7 +99,7 @@
 	struct led_classdev *led = dev_get_drvdata(dev);
 	struct gpio_trig_data *gpio_data = led->trigger_data;
 
-	return sprintf(buf, "%s\n", gpio_data->inverted ? "yes" : "no");
+	return sprintf(buf, "%u\n", gpio_data->inverted);
 }
 
 static ssize_t gpio_trig_inverted_store(struct device *dev,
@@ -107,16 +107,17 @@
 {
 	struct led_classdev *led = dev_get_drvdata(dev);
 	struct gpio_trig_data *gpio_data = led->trigger_data;
-	unsigned inverted;
+	unsigned long inverted;
 	int ret;
 
-	ret = sscanf(buf, "%u", &inverted);
-	if (ret < 1) {
-		dev_err(dev, "invalid value\n");
-		return -EINVAL;
-	}
+	ret = strict_strtoul(buf, 10, &inverted);
+	if (ret < 0)
+		return ret;
 
-	gpio_data->inverted = !!inverted;
+	if (inverted > 1)
+		return -EINVAL;
+
+	gpio_data->inverted = inverted;
 
 	/* After inverting, we need to update the LED. */
 	schedule_work(&gpio_data->work);
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 04b2212..d21578e 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -1137,7 +1137,7 @@
  */
 void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
 {
-	pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages);
+	pte_t *switcher_pte_page = __this_cpu_read(switcher_pte_pages);
 	pte_t regs_pte;
 
 #ifdef CONFIG_X86_PAE
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index b4eb675..9f1659c 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -90,8 +90,8 @@
 	 * meanwhile).  If that's not the case, we pretend everything in the
 	 * Guest has changed.
 	 */
-	if (__get_cpu_var(lg_last_cpu) != cpu || cpu->last_pages != pages) {
-		__get_cpu_var(lg_last_cpu) = cpu;
+	if (__this_cpu_read(lg_last_cpu) != cpu || cpu->last_pages != pages) {
+		__this_cpu_write(lg_last_cpu, cpu);
 		cpu->last_pages = pages;
 		cpu->changed = CHANGED_ALL;
 	}
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 2e041fd..f3a29f2 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -443,7 +443,7 @@
 	tries = 0;
 	for (;;) {
 		nr = i2c_master_recv(fcu, buf, nb);
-		if (nr > 0 || (nr < 0 && nr != ENODEV) || tries >= 100)
+		if (nr > 0 || (nr < 0 && nr != -ENODEV) || tries >= 100)
 			break;
 		msleep(10);
 		++tries;
@@ -464,7 +464,7 @@
 	tries = 0;
 	for (;;) {
 		nw = i2c_master_send(fcu, buf, nb);
-		if (nw > 0 || (nw < 0 && nw != EIO) || tries >= 100)
+		if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100)
 			break;
 		msleep(10);
 		++tries;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b76cfc8..0cc30ec 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -287,6 +287,7 @@
 	mddev_t *mddev = q->queuedata;
 	int rv;
 	int cpu;
+	unsigned int sectors;
 
 	if (mddev == NULL || mddev->pers == NULL
 	    || !mddev->ready) {
@@ -311,12 +312,16 @@
 	atomic_inc(&mddev->active_io);
 	rcu_read_unlock();
 
+	/*
+	 * save the sectors now since our bio can
+	 * go away inside make_request
+	 */
+	sectors = bio_sectors(bio);
 	rv = mddev->pers->make_request(mddev, bio);
 
 	cpu = part_stat_lock();
 	part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
-	part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw],
-		      bio_sectors(bio));
+	part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
 	part_stat_unlock();
 
 	if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
@@ -1947,8 +1952,6 @@
 			__bdevname(dev, b));
 		return PTR_ERR(bdev);
 	}
-	if (!shared)
-		set_bit(AllReserved, &rdev->flags);
 	rdev->bdev = bdev;
 	return err;
 }
@@ -2465,6 +2468,9 @@
 		if (rdev->raid_disk != -1)
 			return -EBUSY;
 
+		if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
+			return -EBUSY;
+
 		if (rdev->mddev->pers->hot_add_disk == NULL)
 			return -EINVAL;
 
@@ -2610,12 +2616,11 @@
 
 			mddev_lock(mddev);
 			list_for_each_entry(rdev2, &mddev->disks, same_set)
-				if (test_bit(AllReserved, &rdev2->flags) ||
-				    (rdev->bdev == rdev2->bdev &&
-				     rdev != rdev2 &&
-				     overlaps(rdev->data_offset, rdev->sectors,
-					      rdev2->data_offset,
-					      rdev2->sectors))) {
+				if (rdev->bdev == rdev2->bdev &&
+				    rdev != rdev2 &&
+				    overlaps(rdev->data_offset, rdev->sectors,
+					     rdev2->data_offset,
+					     rdev2->sectors)) {
 					overlap = 1;
 					break;
 				}
@@ -5578,6 +5583,8 @@
 	mddev->delta_disks = raid_disks - mddev->raid_disks;
 
 	rv = mddev->pers->check_reshape(mddev);
+	if (rv < 0)
+		mddev->delta_disks = 0;
 	return rv;
 }
 
@@ -6985,9 +6992,6 @@
 	} else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
 		mddev->resync_min = mddev->curr_resync_completed;
 	mddev->curr_resync = 0;
-	if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
-		mddev->curr_resync_completed = 0;
-	sysfs_notify(&mddev->kobj, NULL, "sync_completed");
 	wake_up(&resync_wait);
 	set_bit(MD_RECOVERY_DONE, &mddev->recovery);
 	md_wakeup_thread(mddev->thread);
@@ -7028,7 +7032,7 @@
 			}
 		}
 
-	if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) {
+	if (mddev->degraded && !mddev->recovery_disabled) {
 		list_for_each_entry(rdev, &mddev->disks, same_set) {
 			if (rdev->raid_disk >= 0 &&
 			    !test_bit(In_sync, &rdev->flags) &&
@@ -7151,7 +7155,20 @@
 			/* Only thing we do on a ro array is remove
 			 * failed devices.
 			 */
-			remove_and_add_spares(mddev);
+			mdk_rdev_t *rdev;
+			list_for_each_entry(rdev, &mddev->disks, same_set)
+				if (rdev->raid_disk >= 0 &&
+				    !test_bit(Blocked, &rdev->flags) &&
+				    test_bit(Faulty, &rdev->flags) &&
+				    atomic_read(&rdev->nr_pending)==0) {
+					if (mddev->pers->hot_remove_disk(
+						    mddev, rdev->raid_disk)==0) {
+						char nm[20];
+						sprintf(nm,"rd%d", rdev->raid_disk);
+						sysfs_remove_link(&mddev->kobj, nm);
+						rdev->raid_disk = -1;
+					}
+				}
 			clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 			goto unlock;
 		}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index eec517c..7e90b85 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -93,8 +93,6 @@
 #define	Faulty		1		/* device is known to have a fault */
 #define	In_sync		2		/* device is in_sync with rest of array */
 #define	WriteMostly	4		/* Avoid reading if at all possible */
-#define	AllReserved	6		/* If whole device is reserved for
-					 * one array */
 #define	AutoDetected	7		/* added by auto-detect */
 #define Blocked		8		/* An error occured on an externally
 					 * managed array, don't allow writes
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index a39f4c3..637a968 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -179,6 +179,14 @@
 			rdev1->new_raid_disk = j;
 		}
 
+		if (mddev->level == 1) {
+			/* taiking over a raid1 array-
+			 * we have only one active disk
+			 */
+			j = 0;
+			rdev1->new_raid_disk = j;
+		}
+
 		if (j < 0 || j >= mddev->raid_disks) {
 			printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
 			       "aborting!\n", mdname(mddev), j);
@@ -644,12 +652,38 @@
 	return priv_conf;
 }
 
+static void *raid0_takeover_raid1(mddev_t *mddev)
+{
+	raid0_conf_t *priv_conf;
+
+	/* Check layout:
+	 *  - (N - 1) mirror drives must be already faulty
+	 */
+	if ((mddev->raid_disks - 1) != mddev->degraded) {
+		printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
+		       mdname(mddev));
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Set new parameters */
+	mddev->new_level = 0;
+	mddev->new_layout = 0;
+	mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
+	mddev->delta_disks = 1 - mddev->raid_disks;
+	/* make sure it will be not marked as dirty */
+	mddev->recovery_cp = MaxSector;
+
+	create_strip_zones(mddev, &priv_conf);
+	return priv_conf;
+}
+
 static void *raid0_takeover(mddev_t *mddev)
 {
 	/* raid0 can take over:
 	 *  raid4 - if all data disks are active.
 	 *  raid5 - providing it is Raid4 layout and one disk is faulty
 	 *  raid10 - assuming we have all necessary active disks
+	 *  raid1 - with (N -1) mirror drives faulty
 	 */
 	if (mddev->level == 4)
 		return raid0_takeover_raid45(mddev);
@@ -665,6 +699,12 @@
 	if (mddev->level == 10)
 		return raid0_takeover_raid10(mddev);
 
+	if (mddev->level == 1)
+		return raid0_takeover_raid1(mddev);
+
+	printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
+		mddev->level);
+
 	return ERR_PTR(-EINVAL);
 }
 
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 69b6595..3b607b2 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2463,11 +2463,13 @@
 	mddev->recovery_cp = MaxSector;
 
 	conf = setup_conf(mddev);
-	if (!IS_ERR(conf))
+	if (!IS_ERR(conf)) {
 		list_for_each_entry(rdev, &mddev->disks, same_set)
 			if (rdev->raid_disk >= 0)
 				rdev->new_raid_disk = rdev->raid_disk * 2;
-		
+		conf->barrier = 1;
+	}
+
 	return conf;
 }
 
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 5044bab..7028128 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5517,7 +5517,6 @@
 	raid5_conf_t *conf = mddev->private;
 	mdk_rdev_t *rdev;
 	int spares = 0;
-	int added_devices = 0;
 	unsigned long flags;
 
 	if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
@@ -5527,8 +5526,8 @@
 		return -ENOSPC;
 
 	list_for_each_entry(rdev, &mddev->disks, same_set)
-		if ((rdev->raid_disk < 0 || rdev->raid_disk >= conf->raid_disks)
-		     && !test_bit(Faulty, &rdev->flags))
+		if (!test_bit(In_sync, &rdev->flags)
+		    && !test_bit(Faulty, &rdev->flags))
 			spares++;
 
 	if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
@@ -5571,34 +5570,35 @@
 	 * to correctly record the "partially reconstructed" state of
 	 * such devices during the reshape and confusion could result.
 	 */
-	if (mddev->delta_disks >= 0)
-	    list_for_each_entry(rdev, &mddev->disks, same_set)
-		if (rdev->raid_disk < 0 &&
-		    !test_bit(Faulty, &rdev->flags)) {
-			if (raid5_add_disk(mddev, rdev) == 0) {
-				char nm[20];
-				if (rdev->raid_disk >= conf->previous_raid_disks) {
-					set_bit(In_sync, &rdev->flags);
-					added_devices++;
-				} else
-					rdev->recovery_offset = 0;
-				sprintf(nm, "rd%d", rdev->raid_disk);
-				if (sysfs_create_link(&mddev->kobj,
-						      &rdev->kobj, nm))
-					/* Failure here is OK */;
-			} else
-				break;
-		} else if (rdev->raid_disk >= conf->previous_raid_disks
-			   && !test_bit(Faulty, &rdev->flags)) {
-			/* This is a spare that was manually added */
-			set_bit(In_sync, &rdev->flags);
-			added_devices++;
-		}
+	if (mddev->delta_disks >= 0) {
+		int added_devices = 0;
+		list_for_each_entry(rdev, &mddev->disks, same_set)
+			if (rdev->raid_disk < 0 &&
+			    !test_bit(Faulty, &rdev->flags)) {
+				if (raid5_add_disk(mddev, rdev) == 0) {
+					char nm[20];
+					if (rdev->raid_disk
+					    >= conf->previous_raid_disks) {
+						set_bit(In_sync, &rdev->flags);
+						added_devices++;
+					} else
+						rdev->recovery_offset = 0;
+					sprintf(nm, "rd%d", rdev->raid_disk);
+					if (sysfs_create_link(&mddev->kobj,
+							      &rdev->kobj, nm))
+						/* Failure here is OK */;
+				}
+			} else if (rdev->raid_disk >= conf->previous_raid_disks
+				   && !test_bit(Faulty, &rdev->flags)) {
+				/* This is a spare that was manually added */
+				set_bit(In_sync, &rdev->flags);
+				added_devices++;
+			}
 
-	/* When a reshape changes the number of devices, ->degraded
-	 * is measured against the larger of the pre and post number of
-	 * devices.*/
-	if (mddev->delta_disks > 0) {
+		/* When a reshape changes the number of devices,
+		 * ->degraded is measured against the larger of the
+		 * pre and post number of devices.
+		 */
 		spin_lock_irqsave(&conf->device_lock, flags);
 		mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
 			- added_devices;
diff --git a/drivers/media/common/saa7146_core.c b/drivers/media/common/saa7146_core.c
index 982f000..9f47e38 100644
--- a/drivers/media/common/saa7146_core.c
+++ b/drivers/media/common/saa7146_core.c
@@ -452,7 +452,7 @@
 	INFO(("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x).\n", dev->mem, dev->revision, pci->irq, pci->subsystem_vendor, pci->subsystem_device));
 	dev->ext = ext;
 
-	mutex_init(&dev->lock);
+	mutex_init(&dev->v4l2_lock);
 	spin_lock_init(&dev->int_slock);
 	spin_lock_init(&dev->slock);
 
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index e3fedc6..1bd3dd7 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -15,18 +15,15 @@
 	}
 
 	/* is it free? */
-	mutex_lock(&dev->lock);
 	if (vv->resources & bit) {
 		DEB_D(("locked! vv->resources:0x%02x, we want:0x%02x\n",vv->resources,bit));
 		/* no, someone else uses it */
-		mutex_unlock(&dev->lock);
 		return 0;
 	}
 	/* it's free, grab it */
 	fh->resources  |= bit;
 	vv->resources |= bit;
 	DEB_D(("res: get 0x%02x, cur:0x%02x\n",bit,vv->resources));
-	mutex_unlock(&dev->lock);
 	return 1;
 }
 
@@ -37,11 +34,9 @@
 
 	BUG_ON((fh->resources & bits) != bits);
 
-	mutex_lock(&dev->lock);
 	fh->resources  &= ~bits;
 	vv->resources &= ~bits;
 	DEB_D(("res: put 0x%02x, cur:0x%02x\n",bits,vv->resources));
-	mutex_unlock(&dev->lock);
 }
 
 
@@ -396,7 +391,7 @@
 	.write		= fops_write,
 	.poll		= fops_poll,
 	.mmap		= fops_mmap,
-	.ioctl		= video_ioctl2,
+	.unlocked_ioctl	= video_ioctl2,
 };
 
 static void vv_callback(struct saa7146_dev *dev, unsigned long status)
@@ -505,6 +500,7 @@
 	vfd->fops = &video_fops;
 	vfd->ioctl_ops = &dev->ext_vv_data->ops;
 	vfd->release = video_device_release;
+	vfd->lock = &dev->v4l2_lock;
 	vfd->tvnorms = 0;
 	for (i = 0; i < dev->ext_vv_data->num_stds; i++)
 		vfd->tvnorms |= dev->ext_vv_data->stds[i].id;
diff --git a/drivers/media/common/saa7146_vbi.c b/drivers/media/common/saa7146_vbi.c
index 2d4533a..afe8580 100644
--- a/drivers/media/common/saa7146_vbi.c
+++ b/drivers/media/common/saa7146_vbi.c
@@ -412,7 +412,7 @@
 			    V4L2_BUF_TYPE_VBI_CAPTURE,
 			    V4L2_FIELD_SEQ_TB, // FIXME: does this really work?
 			    sizeof(struct saa7146_buf),
-			    file, NULL);
+			    file, &dev->v4l2_lock);
 
 	init_timer(&fh->vbi_read_timeout);
 	fh->vbi_read_timeout.function = vbi_read_timeout;
diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c
index 0ac5c61..9aafa4e 100644
--- a/drivers/media/common/saa7146_video.c
+++ b/drivers/media/common/saa7146_video.c
@@ -553,8 +553,6 @@
 		}
 	}
 
-	mutex_lock(&dev->lock);
-
 	/* ok, accept it */
 	vv->ov_fb = *fb;
 	vv->ov_fmt = fmt;
@@ -563,8 +561,6 @@
 		vv->ov_fb.fmt.bytesperline = vv->ov_fb.fmt.width * fmt->depth / 8;
 		DEB_D(("setting bytesperline to %d\n", vv->ov_fb.fmt.bytesperline));
 	}
-
-	mutex_unlock(&dev->lock);
 	return 0;
 }
 
@@ -649,8 +645,6 @@
 		return -EINVAL;
 	}
 
-	mutex_lock(&dev->lock);
-
 	switch (ctrl->type) {
 	case V4L2_CTRL_TYPE_BOOLEAN:
 	case V4L2_CTRL_TYPE_MENU:
@@ -693,7 +687,6 @@
 		/* fixme: we can support changing VFLIP and HFLIP here... */
 		if (IS_CAPTURE_ACTIVE(fh) != 0) {
 			DEB_D(("V4L2_CID_HFLIP while active capture.\n"));
-			mutex_unlock(&dev->lock);
 			return -EBUSY;
 		}
 		vv->hflip = c->value;
@@ -701,16 +694,13 @@
 	case V4L2_CID_VFLIP:
 		if (IS_CAPTURE_ACTIVE(fh) != 0) {
 			DEB_D(("V4L2_CID_VFLIP while active capture.\n"));
-			mutex_unlock(&dev->lock);
 			return -EBUSY;
 		}
 		vv->vflip = c->value;
 		break;
 	default:
-		mutex_unlock(&dev->lock);
 		return -EINVAL;
 	}
-	mutex_unlock(&dev->lock);
 
 	if (IS_OVERLAY_ACTIVE(fh) != 0) {
 		saa7146_stop_preview(fh);
@@ -902,22 +892,18 @@
 	err = vidioc_try_fmt_vid_overlay(file, fh, f);
 	if (0 != err)
 		return err;
-	mutex_lock(&dev->lock);
 	fh->ov.win    = f->fmt.win;
 	fh->ov.nclips = f->fmt.win.clipcount;
 	if (fh->ov.nclips > 16)
 		fh->ov.nclips = 16;
 	if (copy_from_user(fh->ov.clips, f->fmt.win.clips,
 				sizeof(struct v4l2_clip) * fh->ov.nclips)) {
-		mutex_unlock(&dev->lock);
 		return -EFAULT;
 	}
 
 	/* fh->ov.fh is used to indicate that we have valid overlay informations, too */
 	fh->ov.fh = fh;
 
-	mutex_unlock(&dev->lock);
-
 	/* check if our current overlay is active */
 	if (IS_OVERLAY_ACTIVE(fh) != 0) {
 		saa7146_stop_preview(fh);
@@ -976,8 +962,6 @@
 		}
 	}
 
-	mutex_lock(&dev->lock);
-
 	for (i = 0; i < dev->ext_vv_data->num_stds; i++)
 		if (*id & dev->ext_vv_data->stds[i].id)
 			break;
@@ -988,8 +972,6 @@
 		found = 1;
 	}
 
-	mutex_unlock(&dev->lock);
-
 	if (vv->ov_suspend != NULL) {
 		saa7146_start_preview(vv->ov_suspend);
 		vv->ov_suspend = NULL;
@@ -1354,7 +1336,7 @@
 			    V4L2_BUF_TYPE_VIDEO_CAPTURE,
 			    V4L2_FIELD_INTERLACED,
 			    sizeof(struct saa7146_buf),
-			    file, NULL);
+			    file, &dev->v4l2_lock);
 
 	return 0;
 }
diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig
index 78b0895..6fc79f1 100644
--- a/drivers/media/common/tuners/Kconfig
+++ b/drivers/media/common/tuners/Kconfig
@@ -34,7 +34,7 @@
 config MEDIA_TUNER_CUSTOMISE
 	bool "Customize analog and hybrid tuner modules to build"
 	depends on MEDIA_TUNER
-	default y if EMBEDDED
+	default y if EXPERT
 	help
 	  This allows the user to deselect tuner drivers unnecessary
 	  for their hardware from the build. Use this option with care
diff --git a/drivers/media/common/tuners/tda8290.c b/drivers/media/common/tuners/tda8290.c
index c9062ce..bc6a677 100644
--- a/drivers/media/common/tuners/tda8290.c
+++ b/drivers/media/common/tuners/tda8290.c
@@ -95,8 +95,7 @@
 		msleep(20);
 	} else {
 		msg = disable;
-		tuner_i2c_xfer_send(&priv->i2c_props, msg, 1);
-		tuner_i2c_xfer_recv(&priv->i2c_props, &msg[1], 1);
+		tuner_i2c_xfer_send_recv(&priv->i2c_props, msg, 1, &msg[1], 1);
 
 		buf[2] = msg[1];
 		buf[2] &= ~0x04;
@@ -233,19 +232,22 @@
 		tuner_i2c_xfer_send(&priv->i2c_props, pll_bw_nom, 2);
 	}
 
+
 	tda8290_i2c_bridge(fe, 1);
 
 	if (fe->ops.tuner_ops.set_analog_params)
 		fe->ops.tuner_ops.set_analog_params(fe, params);
 
 	for (i = 0; i < 3; i++) {
-		tuner_i2c_xfer_send(&priv->i2c_props, &addr_pll_stat, 1);
-		tuner_i2c_xfer_recv(&priv->i2c_props, &pll_stat, 1);
+		tuner_i2c_xfer_send_recv(&priv->i2c_props,
+					 &addr_pll_stat, 1, &pll_stat, 1);
 		if (pll_stat & 0x80) {
-			tuner_i2c_xfer_send(&priv->i2c_props, &addr_adc_sat, 1);
-			tuner_i2c_xfer_recv(&priv->i2c_props, &adc_sat, 1);
-			tuner_i2c_xfer_send(&priv->i2c_props, &addr_agc_stat, 1);
-			tuner_i2c_xfer_recv(&priv->i2c_props, &agc_stat, 1);
+			tuner_i2c_xfer_send_recv(&priv->i2c_props,
+						 &addr_adc_sat, 1,
+						 &adc_sat, 1);
+			tuner_i2c_xfer_send_recv(&priv->i2c_props,
+						 &addr_agc_stat, 1,
+						 &agc_stat, 1);
 			tuner_dbg("tda8290 is locked, AGC: %d\n", agc_stat);
 			break;
 		} else {
@@ -259,20 +261,22 @@
 			   agc_stat, adc_sat, pll_stat & 0x80);
 		tuner_i2c_xfer_send(&priv->i2c_props, gainset_2, 2);
 		msleep(100);
-		tuner_i2c_xfer_send(&priv->i2c_props, &addr_agc_stat, 1);
-		tuner_i2c_xfer_recv(&priv->i2c_props, &agc_stat, 1);
-		tuner_i2c_xfer_send(&priv->i2c_props, &addr_pll_stat, 1);
-		tuner_i2c_xfer_recv(&priv->i2c_props, &pll_stat, 1);
+		tuner_i2c_xfer_send_recv(&priv->i2c_props,
+					 &addr_agc_stat, 1, &agc_stat, 1);
+		tuner_i2c_xfer_send_recv(&priv->i2c_props,
+					 &addr_pll_stat, 1, &pll_stat, 1);
 		if ((agc_stat > 115) || !(pll_stat & 0x80)) {
 			tuner_dbg("adjust gain, step 2. Agc: %d, lock: %d\n",
 				   agc_stat, pll_stat & 0x80);
 			if (priv->cfg.agcf)
 				priv->cfg.agcf(fe);
 			msleep(100);
-			tuner_i2c_xfer_send(&priv->i2c_props, &addr_agc_stat, 1);
-			tuner_i2c_xfer_recv(&priv->i2c_props, &agc_stat, 1);
-			tuner_i2c_xfer_send(&priv->i2c_props, &addr_pll_stat, 1);
-			tuner_i2c_xfer_recv(&priv->i2c_props, &pll_stat, 1);
+			tuner_i2c_xfer_send_recv(&priv->i2c_props,
+						 &addr_agc_stat, 1,
+						 &agc_stat, 1);
+			tuner_i2c_xfer_send_recv(&priv->i2c_props,
+						 &addr_pll_stat, 1,
+						 &pll_stat, 1);
 			if((agc_stat > 115) || !(pll_stat & 0x80)) {
 				tuner_dbg("adjust gain, step 3. Agc: %d\n", agc_stat);
 				tuner_i2c_xfer_send(&priv->i2c_props, adc_head_12, 2);
@@ -284,10 +288,12 @@
 
 	/* l/ l' deadlock? */
 	if(priv->tda8290_easy_mode & 0x60) {
-		tuner_i2c_xfer_send(&priv->i2c_props, &addr_adc_sat, 1);
-		tuner_i2c_xfer_recv(&priv->i2c_props, &adc_sat, 1);
-		tuner_i2c_xfer_send(&priv->i2c_props, &addr_pll_stat, 1);
-		tuner_i2c_xfer_recv(&priv->i2c_props, &pll_stat, 1);
+		tuner_i2c_xfer_send_recv(&priv->i2c_props,
+					 &addr_adc_sat, 1,
+					 &adc_sat, 1);
+		tuner_i2c_xfer_send_recv(&priv->i2c_props,
+					 &addr_pll_stat, 1,
+					 &pll_stat, 1);
 		if ((adc_sat > 20) || !(pll_stat & 0x80)) {
 			tuner_dbg("trying to resolve SECAM L deadlock\n");
 			tuner_i2c_xfer_send(&priv->i2c_props, agc_rst_on, 2);
@@ -307,8 +313,7 @@
 	struct tda8290_priv *priv = fe->analog_demod_priv;
 	unsigned char buf[] = { 0x30, 0x00 }; /* clb_stdbt */
 
-	tuner_i2c_xfer_send(&priv->i2c_props, &buf[0], 1);
-	tuner_i2c_xfer_recv(&priv->i2c_props, &buf[1], 1);
+	tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
 
 	if (enable)
 		buf[1] = 0x01;
@@ -323,8 +328,7 @@
 	struct tda8290_priv *priv = fe->analog_demod_priv;
 	unsigned char buf[] = { 0x01, 0x00 };
 
-	tuner_i2c_xfer_send(&priv->i2c_props, &buf[0], 1);
-	tuner_i2c_xfer_recv(&priv->i2c_props, &buf[1], 1);
+	tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
 
 	if (enable)
 		buf[1] = 0x01; /* rising edge sets regs 0x02 - 0x23 */
@@ -353,8 +357,7 @@
 	struct tda8290_priv *priv = fe->analog_demod_priv;
 	unsigned char buf[] = { 0x02, 0x00 }; /* DIV_FUNC */
 
-	tuner_i2c_xfer_send(&priv->i2c_props, &buf[0], 1);
-	tuner_i2c_xfer_recv(&priv->i2c_props, &buf[1], 1);
+	tuner_i2c_xfer_send_recv(&priv->i2c_props, &buf[0], 1, &buf[1], 1);
 
 	if (enable)
 		buf[1] &= ~0x40;
@@ -370,10 +373,10 @@
 	unsigned char set_gpio_cf[]    = { 0x44, 0x00 };
 	unsigned char set_gpio_val[]   = { 0x46, 0x00 };
 
-	tuner_i2c_xfer_send(&priv->i2c_props, &set_gpio_cf[0], 1);
-	tuner_i2c_xfer_recv(&priv->i2c_props, &set_gpio_cf[1], 1);
-	tuner_i2c_xfer_send(&priv->i2c_props, &set_gpio_val[0], 1);
-	tuner_i2c_xfer_recv(&priv->i2c_props, &set_gpio_val[1], 1);
+	tuner_i2c_xfer_send_recv(&priv->i2c_props,
+				 &set_gpio_cf[0], 1, &set_gpio_cf[1], 1);
+	tuner_i2c_xfer_send_recv(&priv->i2c_props,
+				 &set_gpio_val[0], 1, &set_gpio_val[1], 1);
 
 	set_gpio_cf[1] &= 0xf0; /* clear GPIO_0 bits 3-0 */
 
@@ -392,8 +395,7 @@
 	unsigned char hvpll_stat = 0x26;
 	unsigned char ret;
 
-	tuner_i2c_xfer_send(&priv->i2c_props, &hvpll_stat, 1);
-	tuner_i2c_xfer_recv(&priv->i2c_props, &ret, 1);
+	tuner_i2c_xfer_send_recv(&priv->i2c_props, &hvpll_stat, 1, &ret, 1);
 	return (ret & 0x01) ? 65535 : 0;
 }
 
@@ -413,8 +415,8 @@
 	tda8295_power(fe, 1);
 	tda8295_agc1_out(fe, 1);
 
-	tuner_i2c_xfer_send(&priv->i2c_props, &blanking_mode[0], 1);
-	tuner_i2c_xfer_recv(&priv->i2c_props, &blanking_mode[1], 1);
+	tuner_i2c_xfer_send_recv(&priv->i2c_props,
+				 &blanking_mode[0], 1, &blanking_mode[1], 1);
 
 	tda8295_set_video_std(fe);
 
@@ -447,8 +449,8 @@
 	unsigned char i2c_get_afc[1] = { 0x1B };
 	unsigned char afc = 0;
 
-	tuner_i2c_xfer_send(&priv->i2c_props, i2c_get_afc, ARRAY_SIZE(i2c_get_afc));
-	tuner_i2c_xfer_recv(&priv->i2c_props, &afc, 1);
+	tuner_i2c_xfer_send_recv(&priv->i2c_props,
+				 i2c_get_afc, ARRAY_SIZE(i2c_get_afc), &afc, 1);
 	return (afc & 0x80)? 65535:0;
 }
 
@@ -654,20 +656,26 @@
 static int tda8290_probe(struct tuner_i2c_props *i2c_props)
 {
 #define TDA8290_ID 0x89
-	unsigned char tda8290_id[] = { 0x1f, 0x00 };
+	u8 reg = 0x1f, id;
+	struct i2c_msg msg_read[] = {
+		{ .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg },
+		{ .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id },
+	};
 
 	/* detect tda8290 */
-	tuner_i2c_xfer_send(i2c_props, &tda8290_id[0], 1);
-	tuner_i2c_xfer_recv(i2c_props, &tda8290_id[1], 1);
+	if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
+		printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n",
+			       __func__, reg);
+		return -ENODEV;
+	}
 
-	if (tda8290_id[1] == TDA8290_ID) {
+	if (id == TDA8290_ID) {
 		if (debug)
 			printk(KERN_DEBUG "%s: tda8290 detected @ %d-%04x\n",
 			       __func__, i2c_adapter_id(i2c_props->adap),
 			       i2c_props->addr);
 		return 0;
 	}
-
 	return -ENODEV;
 }
 
@@ -675,16 +683,23 @@
 {
 #define TDA8295_ID 0x8a
 #define TDA8295C2_ID 0x8b
-	unsigned char tda8295_id[] = { 0x2f, 0x00 };
+	u8 reg = 0x2f, id;
+	struct i2c_msg msg_read[] = {
+		{ .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg },
+		{ .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id },
+	};
 
-	/* detect tda8295 */
-	tuner_i2c_xfer_send(i2c_props, &tda8295_id[0], 1);
-	tuner_i2c_xfer_recv(i2c_props, &tda8295_id[1], 1);
+	/* detect tda8290 */
+	if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
+		printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n",
+			       __func__, reg);
+		return -ENODEV;
+	}
 
-	if ((tda8295_id[1] & 0xfe) == TDA8295_ID) {
+	if ((id & 0xfe) == TDA8295_ID) {
 		if (debug)
 			printk(KERN_DEBUG "%s: %s detected @ %d-%04x\n",
-			       __func__, (tda8295_id[1] == TDA8295_ID) ?
+			       __func__, (id == TDA8295_ID) ?
 			       "tda8295c1" : "tda8295c2",
 			       i2c_adapter_id(i2c_props->adap),
 			       i2c_props->addr);
@@ -740,9 +755,11 @@
 		       sizeof(struct analog_demod_ops));
 	}
 
-	if ((!(cfg) || (TDA829X_PROBE_TUNER == cfg->probe_tuner)) &&
-	    (tda829x_find_tuner(fe) < 0))
-		goto fail;
+	if (!(cfg) || (TDA829X_PROBE_TUNER == cfg->probe_tuner)) {
+		tda8295_power(fe, 1);
+		if (tda829x_find_tuner(fe) < 0)
+			goto fail;
+	}
 
 	switch (priv->ver) {
 	case TDA8290:
@@ -786,6 +803,8 @@
 	return fe;
 
 fail:
+	memset(&fe->ops.analog_ops, 0, sizeof(struct analog_demod_ops));
+
 	tda829x_release(fe);
 	return NULL;
 }
@@ -809,8 +828,8 @@
 	int i;
 
 	/* rule out tda9887, which would return the same byte repeatedly */
-	tuner_i2c_xfer_send(&i2c_props, soft_reset, 1);
-	tuner_i2c_xfer_recv(&i2c_props, buf, PROBE_BUFFER_SIZE);
+	tuner_i2c_xfer_send_recv(&i2c_props,
+				 soft_reset, 1, buf, PROBE_BUFFER_SIZE);
 	for (i = 1; i < PROBE_BUFFER_SIZE; i++) {
 		if (buf[i] != buf[0])
 			break;
@@ -827,13 +846,12 @@
 	/* fall back to old probing method */
 	tuner_i2c_xfer_send(&i2c_props, easy_mode_b, 2);
 	tuner_i2c_xfer_send(&i2c_props, soft_reset, 2);
-	tuner_i2c_xfer_send(&i2c_props, &addr_dto_lsb, 1);
-	tuner_i2c_xfer_recv(&i2c_props, &data, 1);
+	tuner_i2c_xfer_send_recv(&i2c_props, &addr_dto_lsb, 1, &data, 1);
 	if (data == 0) {
 		tuner_i2c_xfer_send(&i2c_props, easy_mode_g, 2);
 		tuner_i2c_xfer_send(&i2c_props, soft_reset, 2);
-		tuner_i2c_xfer_send(&i2c_props, &addr_dto_lsb, 1);
-		tuner_i2c_xfer_recv(&i2c_props, &data, 1);
+		tuner_i2c_xfer_send_recv(&i2c_props,
+					 &addr_dto_lsb, 1, &data, 1);
 		if (data == 0x7b) {
 			return 0;
 		}
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index 8ca48f7..98ffb40 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -514,8 +514,8 @@
 	union {
 		u16 system16;
 		struct {
-			u8 system;
 			u8 not_system;
+			u8 system;
 		};
 	};
 	u8 data;
@@ -575,7 +575,7 @@
 		if ((poll_reply->system ^ poll_reply->not_system) != 0xff) {
 			deb_data("NEC extended protocol\n");
 			/* NEC extended code - 24 bits */
-			keycode = poll_reply->system16 << 8 | poll_reply->data;
+			keycode = be16_to_cpu(poll_reply->system16) << 8 | poll_reply->data;
 		} else {
 			deb_data("NEC normal protocol\n");
 			/* normal NEC code - 16 bits */
@@ -587,7 +587,7 @@
 		deb_data("RC5 protocol\n");
 		/* RC5 Protocol */
 		toggle = poll_reply->report_id;
-		keycode = poll_reply->system16 << 8 | poll_reply->data;
+		keycode = poll_reply->system << 8 | poll_reply->data;
 
 		break;
 	}
diff --git a/drivers/media/dvb/firewire/firedtv-rc.c b/drivers/media/dvb/firewire/firedtv-rc.c
index fcf3828..f82d4a9 100644
--- a/drivers/media/dvb/firewire/firedtv-rc.c
+++ b/drivers/media/dvb/firewire/firedtv-rc.c
@@ -172,7 +172,8 @@
 
 void fdtv_handle_rc(struct firedtv *fdtv, unsigned int code)
 {
-	u16 *keycode = fdtv->remote_ctrl_dev->keycode;
+	struct input_dev *idev = fdtv->remote_ctrl_dev;
+	u16 *keycode = idev->keycode;
 
 	if (code >= 0x0300 && code <= 0x031f)
 		code = keycode[code - 0x0300];
@@ -188,6 +189,8 @@
 		return;
 	}
 
-	input_report_key(fdtv->remote_ctrl_dev, code, 1);
-	input_report_key(fdtv->remote_ctrl_dev, code, 0);
+	input_report_key(idev, code, 1);
+	input_sync(idev);
+	input_report_key(idev, code, 0);
+	input_sync(idev);
 }
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index ef3e43a..b8519ba 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -1,7 +1,7 @@
 config DVB_FE_CUSTOMISE
 	bool "Customise the frontend modules to build"
 	depends on DVB_CORE
-	default y if EMBEDDED
+	default y if EXPERT
 	help
 	  This allows the user to select/deselect frontend drivers for their
 	  hardware from the build.
diff --git a/drivers/media/dvb/frontends/af9013.c b/drivers/media/dvb/frontends/af9013.c
index ce22205..ba25fa0 100644
--- a/drivers/media/dvb/frontends/af9013.c
+++ b/drivers/media/dvb/frontends/af9013.c
@@ -334,11 +334,11 @@
 				if_sample_freq = 3300000; /* 3.3 MHz */
 				break;
 			case BANDWIDTH_7_MHZ:
-				if_sample_freq = 3800000; /* 3.8 MHz */
+				if_sample_freq = 3500000; /* 3.5 MHz */
 				break;
 			case BANDWIDTH_8_MHZ:
 			default:
-				if_sample_freq = 4300000; /* 4.3 MHz */
+				if_sample_freq = 4000000; /* 4.0 MHz */
 				break;
 			}
 		} else if (state->config.tuner == AF9013_TUNER_TDA18218) {
diff --git a/drivers/media/dvb/frontends/ix2505v.c b/drivers/media/dvb/frontends/ix2505v.c
index 6360c68..6c2e929 100644
--- a/drivers/media/dvb/frontends/ix2505v.c
+++ b/drivers/media/dvb/frontends/ix2505v.c
@@ -311,7 +311,7 @@
 	return fe;
 
 error:
-	ix2505v_release(fe);
+	kfree(state);
 	return NULL;
 }
 EXPORT_SYMBOL(ix2505v_attach);
diff --git a/drivers/media/dvb/frontends/mb86a20s.c b/drivers/media/dvb/frontends/mb86a20s.c
index d3ad3e7..cc4acd2 100644
--- a/drivers/media/dvb/frontends/mb86a20s.c
+++ b/drivers/media/dvb/frontends/mb86a20s.c
@@ -43,6 +43,8 @@
 	const struct mb86a20s_config *config;
 
 	struct dvb_frontend frontend;
+
+	bool need_init;
 };
 
 struct regdata {
@@ -318,7 +320,7 @@
 
 	rc = i2c_transfer(state->i2c, &msg, 1);
 	if (rc != 1) {
-		printk("%s: writereg rcor(rc == %i, reg == 0x%02x,"
+		printk("%s: writereg error (rc == %i, reg == 0x%02x,"
 			 " data == 0x%02x)\n", __func__, rc, reg, data);
 		return rc;
 	}
@@ -353,7 +355,7 @@
 	rc = i2c_transfer(state->i2c, msg, 2);
 
 	if (rc != 2) {
-		rc("%s: reg=0x%x (rcor=%d)\n", __func__, reg, rc);
+		rc("%s: reg=0x%x (error=%d)\n", __func__, reg, rc);
 		return rc;
 	}
 
@@ -382,23 +384,31 @@
 	/* Initialize the frontend */
 	rc = mb86a20s_writeregdata(state, mb86a20s_init);
 	if (rc < 0)
-		return rc;
+		goto err;
 
 	if (!state->config->is_serial) {
 		regD5 &= ~1;
 
 		rc = mb86a20s_writereg(state, 0x50, 0xd5);
 		if (rc < 0)
-			return rc;
+			goto err;
 		rc = mb86a20s_writereg(state, 0x51, regD5);
 		if (rc < 0)
-			return rc;
+			goto err;
 	}
 
 	if (fe->ops.i2c_gate_ctrl)
 		fe->ops.i2c_gate_ctrl(fe, 1);
 
-	return 0;
+err:
+	if (rc < 0) {
+		state->need_init = true;
+		printk(KERN_INFO "mb86a20s: Init failed. Will try again later\n");
+	} else {
+		state->need_init = false;
+		dprintk("Initialization succeded.\n");
+	}
+	return rc;
 }
 
 static int mb86a20s_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
@@ -485,8 +495,22 @@
 
 	if (fe->ops.i2c_gate_ctrl)
 		fe->ops.i2c_gate_ctrl(fe, 1);
+	dprintk("Calling tuner set parameters\n");
 	fe->ops.tuner_ops.set_params(fe, p);
 
+	/*
+	 * Make it more reliable: if, for some reason, the initial
+	 * device initialization doesn't happen, initialize it when
+	 * a SBTVD parameters are adjusted.
+	 *
+	 * Unfortunately, due to a hard to track bug at tda829x/tda18271,
+	 * the agc callback logic is not called during DVB attach time,
+	 * causing mb86a20s to not be initialized with Kworld SBTVD.
+	 * So, this hack is needed, in order to make Kworld SBTVD to work.
+	 */
+	if (state->need_init)
+		mb86a20s_initfe(fe);
+
 	if (fe->ops.i2c_gate_ctrl)
 		fe->ops.i2c_gate_ctrl(fe, 0);
 	rc = mb86a20s_writeregdata(state, mb86a20s_reset_reception);
diff --git a/drivers/media/dvb/ttpci/av7110_ca.c b/drivers/media/dvb/ttpci/av7110_ca.c
index 122c728..9fc1dd0 100644
--- a/drivers/media/dvb/ttpci/av7110_ca.c
+++ b/drivers/media/dvb/ttpci/av7110_ca.c
@@ -277,7 +277,7 @@
 	{
 		ca_slot_info_t *info=(ca_slot_info_t *)parg;
 
-		if (info->num > 1)
+		if (info->num < 0 || info->num > 1)
 			return -EINVAL;
 		av7110->ci_slot[info->num].num = info->num;
 		av7110->ci_slot[info->num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ?
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index 3c5a473..ecdffa6 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -151,20 +151,6 @@
 	  following ports will be probed: 0x20c, 0x30c, 0x24c, 0x34c, 0x248 and
 	  0x28c.
 
-config RADIO_GEMTEK_PCI
-	tristate "GemTek PCI Radio Card support"
-	depends on VIDEO_V4L2 && PCI
-	---help---
-	  Choose Y here if you have this PCI FM radio card.
-
-	  In order to control your radio card, you will need to use programs
-	  that are compatible with the Video for Linux API.  Information on
-	  this API and pointers to "v4l" programs may be found at
-	  <file:Documentation/video4linux/API.html>.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called radio-gemtek-pci.
-
 config RADIO_MAXIRADIO
 	tristate "Guillemot MAXI Radio FM 2000 radio"
 	depends on VIDEO_V4L2 && PCI
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index d297074..717656d 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -13,7 +13,6 @@
 obj-$(CONFIG_RADIO_RTRACK) += radio-aimslab.o
 obj-$(CONFIG_RADIO_ZOLTRIX) += radio-zoltrix.o
 obj-$(CONFIG_RADIO_GEMTEK) += radio-gemtek.o
-obj-$(CONFIG_RADIO_GEMTEK_PCI) += radio-gemtek-pci.o
 obj-$(CONFIG_RADIO_TRUST) += radio-trust.o
 obj-$(CONFIG_I2C_SI4713) += si4713-i2c.o
 obj-$(CONFIG_RADIO_SI4713) += radio-si4713.o
diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c
index 6cc5d13..4ce10db 100644
--- a/drivers/media/radio/radio-aimslab.c
+++ b/drivers/media/radio/radio-aimslab.c
@@ -31,6 +31,7 @@
 #include <linux/module.h>	/* Modules 			*/
 #include <linux/init.h>		/* Initdata			*/
 #include <linux/ioport.h>	/* request_region		*/
+#include <linux/delay.h>	/* msleep			*/
 #include <linux/videodev2.h>	/* kernel radio structs		*/
 #include <linux/version.h>	/* for KERNEL_VERSION MACRO	*/
 #include <linux/io.h>		/* outb, outb_p			*/
diff --git a/drivers/media/radio/radio-gemtek-pci.c b/drivers/media/radio/radio-gemtek-pci.c
deleted file mode 100644
index 28fa85b..0000000
--- a/drivers/media/radio/radio-gemtek-pci.c
+++ /dev/null
@@ -1,478 +0,0 @@
-/*
- ***************************************************************************
- *
- *     radio-gemtek-pci.c - Gemtek PCI Radio driver
- *     (C) 2001 Vladimir Shebordaev <vshebordaev@mail.ru>
- *
- ***************************************************************************
- *
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License as
- *     published by the Free Software Foundation; either version 2 of
- *     the License, or (at your option) any later version.
- *
- *     This program is distributed in the hope that it will be useful,
- *     but WITHOUT ANY WARRANTY; without even the implied warranty of
- *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *     GNU General Public License for more details.
- *
- *     You should have received a copy of the GNU General Public
- *     License along with this program; if not, write to the Free
- *     Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139,
- *     USA.
- *
- ***************************************************************************
- *
- *     Gemtek Corp still silently refuses to release any specifications
- *     of their multimedia devices, so the protocol still has to be
- *     reverse engineered.
- *
- *     The v4l code was inspired by Jonas Munsin's  Gemtek serial line
- *     radio device driver.
- *
- *     Please, let me know if this piece of code was useful :)
- *
- *     TODO: multiple device support and portability were not tested
- *
- *     Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
- *
- ***************************************************************************
- */
-
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/videodev2.h>
-#include <linux/errno.h>
-#include <linux/version.h>      /* for KERNEL_VERSION MACRO     */
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-
-MODULE_AUTHOR("Vladimir Shebordaev <vshebordaev@mail.ru>");
-MODULE_DESCRIPTION("The video4linux driver for the Gemtek PCI Radio Card");
-MODULE_LICENSE("GPL");
-
-static int nr_radio = -1;
-static int mx = 1;
-
-module_param(mx, bool, 0);
-MODULE_PARM_DESC(mx, "single digit: 1 - turn off the turner upon module exit (default), 0 - do not");
-module_param(nr_radio, int, 0);
-MODULE_PARM_DESC(nr_radio, "video4linux device number to use");
-
-#define RADIO_VERSION KERNEL_VERSION(0, 0, 2)
-
-#ifndef PCI_VENDOR_ID_GEMTEK
-#define PCI_VENDOR_ID_GEMTEK 0x5046
-#endif
-
-#ifndef PCI_DEVICE_ID_GEMTEK_PR103
-#define PCI_DEVICE_ID_GEMTEK_PR103 0x1001
-#endif
-
-#ifndef GEMTEK_PCI_RANGE_LOW
-#define GEMTEK_PCI_RANGE_LOW (87*16000)
-#endif
-
-#ifndef GEMTEK_PCI_RANGE_HIGH
-#define GEMTEK_PCI_RANGE_HIGH (108*16000)
-#endif
-
-struct gemtek_pci {
-	struct v4l2_device v4l2_dev;
-	struct video_device vdev;
-	struct mutex lock;
-	struct pci_dev *pdev;
-
-	u32 iobase;
-	u32 length;
-
-	u32 current_frequency;
-	u8  mute;
-};
-
-static inline struct gemtek_pci *to_gemtek_pci(struct v4l2_device *v4l2_dev)
-{
-	return container_of(v4l2_dev, struct gemtek_pci, v4l2_dev);
-}
-
-static inline u8 gemtek_pci_out(u16 value, u32 port)
-{
-	outw(value, port);
-
-	return (u8)value;
-}
-
-#define _b0(v) (*((u8 *)&v))
-
-static void __gemtek_pci_cmd(u16 value, u32 port, u8 *last_byte, int keep)
-{
-	u8 byte = *last_byte;
-
-	if (!value) {
-		if (!keep)
-			value = (u16)port;
-		byte &= 0xfd;
-	} else
-		byte |= 2;
-
-	_b0(value) = byte;
-	outw(value, port);
-	byte |= 1;
-	_b0(value) = byte;
-	outw(value, port);
-	byte &= 0xfe;
-	_b0(value) = byte;
-	outw(value, port);
-
-	*last_byte = byte;
-}
-
-static inline void gemtek_pci_nil(u32 port, u8 *last_byte)
-{
-	__gemtek_pci_cmd(0x00, port, last_byte, false);
-}
-
-static inline void gemtek_pci_cmd(u16 cmd, u32 port, u8 *last_byte)
-{
-	__gemtek_pci_cmd(cmd, port, last_byte, true);
-}
-
-static void gemtek_pci_setfrequency(struct gemtek_pci *card, unsigned long frequency)
-{
-	int i;
-	u32 value = frequency / 200 + 856;
-	u16 mask = 0x8000;
-	u8 last_byte;
-	u32 port = card->iobase;
-
-	mutex_lock(&card->lock);
-	card->current_frequency = frequency;
-	last_byte = gemtek_pci_out(0x06, port);
-
-	i = 0;
-	do {
-		gemtek_pci_nil(port, &last_byte);
-		i++;
-	} while (i < 9);
-
-	i = 0;
-	do {
-		gemtek_pci_cmd(value & mask, port, &last_byte);
-		mask >>= 1;
-		i++;
-	} while (i < 16);
-
-	outw(0x10, port);
-	mutex_unlock(&card->lock);
-}
-
-
-static void gemtek_pci_mute(struct gemtek_pci *card)
-{
-	mutex_lock(&card->lock);
-	outb(0x1f, card->iobase);
-	card->mute = true;
-	mutex_unlock(&card->lock);
-}
-
-static void gemtek_pci_unmute(struct gemtek_pci *card)
-{
-	if (card->mute) {
-		gemtek_pci_setfrequency(card, card->current_frequency);
-		card->mute = false;
-	}
-}
-
-static int gemtek_pci_getsignal(struct gemtek_pci *card)
-{
-	int sig;
-
-	mutex_lock(&card->lock);
-	sig = (inb(card->iobase) & 0x08) ? 0 : 1;
-	mutex_unlock(&card->lock);
-	return sig;
-}
-
-static int vidioc_querycap(struct file *file, void *priv,
-					struct v4l2_capability *v)
-{
-	struct gemtek_pci *card = video_drvdata(file);
-
-	strlcpy(v->driver, "radio-gemtek-pci", sizeof(v->driver));
-	strlcpy(v->card, "GemTek PCI Radio", sizeof(v->card));
-	snprintf(v->bus_info, sizeof(v->bus_info), "PCI:%s", pci_name(card->pdev));
-	v->version = RADIO_VERSION;
-	v->capabilities = V4L2_CAP_TUNER | V4L2_CAP_RADIO;
-	return 0;
-}
-
-static int vidioc_g_tuner(struct file *file, void *priv,
-					struct v4l2_tuner *v)
-{
-	struct gemtek_pci *card = video_drvdata(file);
-
-	if (v->index > 0)
-		return -EINVAL;
-
-	strlcpy(v->name, "FM", sizeof(v->name));
-	v->type = V4L2_TUNER_RADIO;
-	v->rangelow = GEMTEK_PCI_RANGE_LOW;
-	v->rangehigh = GEMTEK_PCI_RANGE_HIGH;
-	v->rxsubchans = V4L2_TUNER_SUB_MONO;
-	v->capability = V4L2_TUNER_CAP_LOW;
-	v->audmode = V4L2_TUNER_MODE_MONO;
-	v->signal = 0xffff * gemtek_pci_getsignal(card);
-	return 0;
-}
-
-static int vidioc_s_tuner(struct file *file, void *priv,
-					struct v4l2_tuner *v)
-{
-	return v->index ? -EINVAL : 0;
-}
-
-static int vidioc_s_frequency(struct file *file, void *priv,
-					struct v4l2_frequency *f)
-{
-	struct gemtek_pci *card = video_drvdata(file);
-
-	if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO)
-		return -EINVAL;
-	if (f->frequency < GEMTEK_PCI_RANGE_LOW ||
-	    f->frequency > GEMTEK_PCI_RANGE_HIGH)
-		return -EINVAL;
-	gemtek_pci_setfrequency(card, f->frequency);
-	card->mute = false;
-	return 0;
-}
-
-static int vidioc_g_frequency(struct file *file, void *priv,
-					struct v4l2_frequency *f)
-{
-	struct gemtek_pci *card = video_drvdata(file);
-
-	if (f->tuner != 0)
-		return -EINVAL;
-	f->type = V4L2_TUNER_RADIO;
-	f->frequency = card->current_frequency;
-	return 0;
-}
-
-static int vidioc_queryctrl(struct file *file, void *priv,
-					struct v4l2_queryctrl *qc)
-{
-	switch (qc->id) {
-	case V4L2_CID_AUDIO_MUTE:
-		return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
-	case V4L2_CID_AUDIO_VOLUME:
-		return v4l2_ctrl_query_fill(qc, 0, 65535, 65535, 65535);
-	}
-	return -EINVAL;
-}
-
-static int vidioc_g_ctrl(struct file *file, void *priv,
-					struct v4l2_control *ctrl)
-{
-	struct gemtek_pci *card = video_drvdata(file);
-
-	switch (ctrl->id) {
-	case V4L2_CID_AUDIO_MUTE:
-		ctrl->value = card->mute;
-		return 0;
-	case V4L2_CID_AUDIO_VOLUME:
-		if (card->mute)
-			ctrl->value = 0;
-		else
-			ctrl->value = 65535;
-		return 0;
-	}
-	return -EINVAL;
-}
-
-static int vidioc_s_ctrl(struct file *file, void *priv,
-					struct v4l2_control *ctrl)
-{
-	struct gemtek_pci *card = video_drvdata(file);
-
-	switch (ctrl->id) {
-	case V4L2_CID_AUDIO_MUTE:
-		if (ctrl->value)
-			gemtek_pci_mute(card);
-		else
-			gemtek_pci_unmute(card);
-		return 0;
-	case V4L2_CID_AUDIO_VOLUME:
-		if (ctrl->value)
-			gemtek_pci_unmute(card);
-		else
-			gemtek_pci_mute(card);
-		return 0;
-	}
-	return -EINVAL;
-}
-
-static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
-{
-	*i = 0;
-	return 0;
-}
-
-static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
-{
-	return i ? -EINVAL : 0;
-}
-
-static int vidioc_g_audio(struct file *file, void *priv,
-					struct v4l2_audio *a)
-{
-	a->index = 0;
-	strlcpy(a->name, "Radio", sizeof(a->name));
-	a->capability = V4L2_AUDCAP_STEREO;
-	return 0;
-}
-
-static int vidioc_s_audio(struct file *file, void *priv,
-					struct v4l2_audio *a)
-{
-	return a->index ? -EINVAL : 0;
-}
-
-enum {
-	GEMTEK_PR103
-};
-
-static char *card_names[] __devinitdata = {
-	"GEMTEK_PR103"
-};
-
-static struct pci_device_id gemtek_pci_id[] =
-{
-	{ PCI_VENDOR_ID_GEMTEK, PCI_DEVICE_ID_GEMTEK_PR103,
-	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, GEMTEK_PR103 },
-	{ 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, gemtek_pci_id);
-
-static const struct v4l2_file_operations gemtek_pci_fops = {
-	.owner		= THIS_MODULE,
-	.unlocked_ioctl	= video_ioctl2,
-};
-
-static const struct v4l2_ioctl_ops gemtek_pci_ioctl_ops = {
-	.vidioc_querycap    = vidioc_querycap,
-	.vidioc_g_tuner     = vidioc_g_tuner,
-	.vidioc_s_tuner     = vidioc_s_tuner,
-	.vidioc_g_audio     = vidioc_g_audio,
-	.vidioc_s_audio     = vidioc_s_audio,
-	.vidioc_g_input     = vidioc_g_input,
-	.vidioc_s_input     = vidioc_s_input,
-	.vidioc_g_frequency = vidioc_g_frequency,
-	.vidioc_s_frequency = vidioc_s_frequency,
-	.vidioc_queryctrl   = vidioc_queryctrl,
-	.vidioc_g_ctrl      = vidioc_g_ctrl,
-	.vidioc_s_ctrl      = vidioc_s_ctrl,
-};
-
-static int __devinit gemtek_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
-{
-	struct gemtek_pci *card;
-	struct v4l2_device *v4l2_dev;
-	int res;
-
-	card = kzalloc(sizeof(struct gemtek_pci), GFP_KERNEL);
-	if (card == NULL) {
-		dev_err(&pdev->dev, "out of memory\n");
-		return -ENOMEM;
-	}
-
-	v4l2_dev = &card->v4l2_dev;
-	mutex_init(&card->lock);
-	card->pdev = pdev;
-
-	strlcpy(v4l2_dev->name, "gemtek_pci", sizeof(v4l2_dev->name));
-
-	res = v4l2_device_register(&pdev->dev, v4l2_dev);
-	if (res < 0) {
-		v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
-		kfree(card);
-		return res;
-	}
-
-	if (pci_enable_device(pdev))
-		goto err_pci;
-
-	card->iobase = pci_resource_start(pdev, 0);
-	card->length = pci_resource_len(pdev, 0);
-
-	if (request_region(card->iobase, card->length, card_names[pci_id->driver_data]) == NULL) {
-		v4l2_err(v4l2_dev, "i/o port already in use\n");
-		goto err_pci;
-	}
-
-	strlcpy(card->vdev.name, v4l2_dev->name, sizeof(card->vdev.name));
-	card->vdev.v4l2_dev = v4l2_dev;
-	card->vdev.fops = &gemtek_pci_fops;
-	card->vdev.ioctl_ops = &gemtek_pci_ioctl_ops;
-	card->vdev.release = video_device_release_empty;
-	video_set_drvdata(&card->vdev, card);
-
-	gemtek_pci_mute(card);
-
-	if (video_register_device(&card->vdev, VFL_TYPE_RADIO, nr_radio) < 0)
-		goto err_video;
-
-	v4l2_info(v4l2_dev, "Gemtek PCI Radio (rev. %d) found at 0x%04x-0x%04x.\n",
-		pdev->revision, card->iobase, card->iobase + card->length - 1);
-
-	return 0;
-
-err_video:
-	release_region(card->iobase, card->length);
-
-err_pci:
-	v4l2_device_unregister(v4l2_dev);
-	kfree(card);
-	return -ENODEV;
-}
-
-static void __devexit gemtek_pci_remove(struct pci_dev *pdev)
-{
-	struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev);
-	struct gemtek_pci *card = to_gemtek_pci(v4l2_dev);
-
-	video_unregister_device(&card->vdev);
-	v4l2_device_unregister(v4l2_dev);
-
-	release_region(card->iobase, card->length);
-
-	if (mx)
-		gemtek_pci_mute(card);
-
-	kfree(card);
-}
-
-static struct pci_driver gemtek_pci_driver = {
-	.name		= "gemtek_pci",
-	.id_table	= gemtek_pci_id,
-	.probe		= gemtek_pci_probe,
-	.remove		= __devexit_p(gemtek_pci_remove),
-};
-
-static int __init gemtek_pci_init(void)
-{
-	return pci_register_driver(&gemtek_pci_driver);
-}
-
-static void __exit gemtek_pci_exit(void)
-{
-	pci_unregister_driver(&gemtek_pci_driver);
-}
-
-module_init(gemtek_pci_init);
-module_exit(gemtek_pci_exit);
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index 6459a22..5c2a905 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -77,8 +77,8 @@
 /* TEA5757 pin mappings */
 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
 
-#define FREQ_LO		(50 * 16000)
-#define FREQ_HI		(150 * 16000)
+#define FREQ_LO		(87 * 16000)
+#define FREQ_HI		(108 * 16000)
 
 #define FREQ_IF         171200 /* 10.7*16000   */
 #define FREQ_STEP       200    /* 12.5*16      */
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index dd6bd36..7ecc8e6 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1407,7 +1407,7 @@
 	.read		= wl1273_fm_fops_read,
 	.write		= wl1273_fm_fops_write,
 	.poll		= wl1273_fm_fops_poll,
-	.ioctl		= video_ioctl2,
+	.unlocked_ioctl	= video_ioctl2,
 	.open		= wl1273_fm_fops_open,
 	.release	= wl1273_fm_fops_release,
 };
diff --git a/drivers/media/radio/si470x/radio-si470x-common.c b/drivers/media/radio/si470x/radio-si470x-common.c
index ac76dfe..60c176f 100644
--- a/drivers/media/radio/si470x/radio-si470x-common.c
+++ b/drivers/media/radio/si470x/radio-si470x-common.c
@@ -357,7 +357,8 @@
 		goto done;
 
 	/* sysconfig 1 */
-	radio->registers[SYSCONFIG1] = SYSCONFIG1_DE;
+	radio->registers[SYSCONFIG1] =
+		(de << 11) & SYSCONFIG1_DE;		/* DE*/
 	retval = si470x_set_register(radio, SYSCONFIG1);
 	if (retval < 0)
 		goto done;
@@ -687,12 +688,8 @@
 	/* driver constants */
 	strcpy(tuner->name, "FM");
 	tuner->type = V4L2_TUNER_RADIO;
-#if defined(CONFIG_USB_SI470X) || defined(CONFIG_USB_SI470X_MODULE)
 	tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO |
 			    V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO;
-#else
-	tuner->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
-#endif
 
 	/* range limits */
 	switch ((radio->registers[SYSCONFIG2] & SYSCONFIG2_BAND) >> 6) {
@@ -718,12 +715,10 @@
 		tuner->rxsubchans = V4L2_TUNER_SUB_MONO;
 	else
 		tuner->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
-#if defined(CONFIG_USB_SI470X) || defined(CONFIG_USB_SI470X_MODULE)
 	/* If there is a reliable method of detecting an RDS channel,
 	   then this code should check for that before setting this
 	   RDS subchannel. */
 	tuner->rxsubchans |= V4L2_TUNER_SUB_RDS;
-#endif
 
 	/* mono/stereo selector */
 	if ((radio->registers[POWERCFG] & POWERCFG_MONO) == 0)
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index 80b3c31..1ac4913 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -446,27 +446,27 @@
 
 select_timeout:
 	if (dev->rx_fan_input_inuse) {
-		dev->rdev->rx_resolution = MS_TO_NS(ENE_FW_SAMPLE_PERIOD_FAN);
+		dev->rdev->rx_resolution = US_TO_NS(ENE_FW_SAMPLE_PERIOD_FAN);
 
 		/* Fan input doesn't support timeouts, it just ends the
 			input with a maximum sample */
 		dev->rdev->min_timeout = dev->rdev->max_timeout =
-			MS_TO_NS(ENE_FW_SMPL_BUF_FAN_MSK *
+			US_TO_NS(ENE_FW_SMPL_BUF_FAN_MSK *
 				ENE_FW_SAMPLE_PERIOD_FAN);
 	} else {
-		dev->rdev->rx_resolution = MS_TO_NS(sample_period);
+		dev->rdev->rx_resolution = US_TO_NS(sample_period);
 
 		/* Theoreticly timeout is unlimited, but we cap it
 		 * because it was seen that on one device, it
 		 * would stop sending spaces after around 250 msec.
 		 * Besides, this is close to 2^32 anyway and timeout is u32.
 		 */
-		dev->rdev->min_timeout = MS_TO_NS(127 * sample_period);
-		dev->rdev->max_timeout = MS_TO_NS(200000);
+		dev->rdev->min_timeout = US_TO_NS(127 * sample_period);
+		dev->rdev->max_timeout = US_TO_NS(200000);
 	}
 
 	if (dev->hw_learning_and_tx_capable)
-		dev->rdev->tx_resolution = MS_TO_NS(sample_period);
+		dev->rdev->tx_resolution = US_TO_NS(sample_period);
 
 	if (dev->rdev->timeout > dev->rdev->max_timeout)
 		dev->rdev->timeout = dev->rdev->max_timeout;
@@ -801,7 +801,7 @@
 
 		dbg("RX: %d (%s)", hw_sample, pulse ? "pulse" : "space");
 
-		ev.duration = MS_TO_NS(hw_sample);
+		ev.duration = US_TO_NS(hw_sample);
 		ev.pulse = pulse;
 		ir_raw_event_store_with_filter(dev->rdev, &ev);
 	}
@@ -821,7 +821,7 @@
 	dev->learning_mode_enabled = learning_mode_force;
 
 	/* Set reasonable default timeout */
-	dev->rdev->timeout = MS_TO_NS(150000);
+	dev->rdev->timeout = US_TO_NS(150000);
 }
 
 /* Upload all hardware settings at once. Used at load and resume time */
@@ -1004,6 +1004,10 @@
 	/* validate resources */
 	error = -ENODEV;
 
+	/* init these to -1, as 0 is valid for both */
+	dev->hw_io = -1;
+	dev->irq = -1;
+
 	if (!pnp_port_valid(pnp_dev, 0) ||
 	    pnp_port_len(pnp_dev, 0) < ENE_IO_SIZE)
 		goto error;
@@ -1072,6 +1076,8 @@
 		rdev->input_name = "ENE eHome Infrared Remote Transceiver";
 	}
 
+	dev->rdev = rdev;
+
 	ene_rx_setup_hw_buffer(dev);
 	ene_setup_default_settings(dev);
 	ene_setup_hw_settings(dev);
@@ -1083,7 +1089,6 @@
 	if (error < 0)
 		goto error;
 
-	dev->rdev = rdev;
 	ene_notice("driver has been succesfully loaded");
 	return 0;
 error:
diff --git a/drivers/media/rc/ene_ir.h b/drivers/media/rc/ene_ir.h
index c179baf..337a41d 100644
--- a/drivers/media/rc/ene_ir.h
+++ b/drivers/media/rc/ene_ir.h
@@ -201,8 +201,6 @@
 #define dbg_verbose(format, ...)	__dbg(2, format, ## __VA_ARGS__)
 #define dbg_regs(format, ...)		__dbg(3, format, ## __VA_ARGS__)
 
-#define MS_TO_NS(msec) ((msec) * 1000)
-
 struct ene_device {
 	struct pnp_dev *pnp_dev;
 	struct rc_dev *rdev;
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 6811512..e7dc6b4 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -988,7 +988,6 @@
 	int retval;
 	struct imon_context *ictx = rc->priv;
 	struct device *dev = ictx->dev;
-	bool pad_mouse;
 	unsigned char ir_proto_packet[] = {
 		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
 
@@ -1000,29 +999,20 @@
 	case RC_TYPE_RC6:
 		dev_dbg(dev, "Configuring IR receiver for MCE protocol\n");
 		ir_proto_packet[0] = 0x01;
-		pad_mouse = false;
 		break;
 	case RC_TYPE_UNKNOWN:
 	case RC_TYPE_OTHER:
 		dev_dbg(dev, "Configuring IR receiver for iMON protocol\n");
-		if (pad_stabilize && !nomouse)
-			pad_mouse = true;
-		else {
+		if (!pad_stabilize)
 			dev_dbg(dev, "PAD stabilize functionality disabled\n");
-			pad_mouse = false;
-		}
 		/* ir_proto_packet[0] = 0x00; // already the default */
 		rc_type = RC_TYPE_OTHER;
 		break;
 	default:
 		dev_warn(dev, "Unsupported IR protocol specified, overriding "
 			 "to iMON IR protocol\n");
-		if (pad_stabilize && !nomouse)
-			pad_mouse = true;
-		else {
+		if (!pad_stabilize)
 			dev_dbg(dev, "PAD stabilize functionality disabled\n");
-			pad_mouse = false;
-		}
 		/* ir_proto_packet[0] = 0x00; // already the default */
 		rc_type = RC_TYPE_OTHER;
 		break;
@@ -1035,7 +1025,7 @@
 		goto out;
 
 	ictx->rc_type = rc_type;
-	ictx->pad_mouse = pad_mouse;
+	ictx->pad_mouse = false;
 
 out:
 	return retval;
@@ -1517,7 +1507,7 @@
 			spin_unlock_irqrestore(&ictx->kc_lock, flags);
 			return;
 		} else {
-			ictx->pad_mouse = 0;
+			ictx->pad_mouse = false;
 			dev_dbg(dev, "mouse mode disabled, passing key value\n");
 		}
 	}
@@ -1756,7 +1746,6 @@
 	printk(KERN_CONT " (id 0x%02x)\n", ffdc_cfg_byte);
 
 	ictx->display_type = detected_display_type;
-	ictx->rdev->allowed_protos = allowed_protos;
 	ictx->rc_type = allowed_protos;
 }
 
@@ -1839,10 +1828,6 @@
 	rdev->allowed_protos = RC_TYPE_OTHER | RC_TYPE_RC6; /* iMON PAD or MCE */
 	rdev->change_protocol = imon_ir_change_protocol;
 	rdev->driver_name = MOD_NAME;
-	if (ictx->rc_type == RC_TYPE_RC6)
-		rdev->map_name = RC_MAP_IMON_MCE;
-	else
-		rdev->map_name = RC_MAP_IMON_PAD;
 
 	/* Enable front-panel buttons and/or knobs */
 	memcpy(ictx->usb_tx_buf, &fp_packet, sizeof(fp_packet));
@@ -1851,11 +1836,18 @@
 	if (ret)
 		dev_info(ictx->dev, "panel buttons/knobs setup failed\n");
 
-	if (ictx->product == 0xffdc)
+	if (ictx->product == 0xffdc) {
 		imon_get_ffdc_type(ictx);
+		rdev->allowed_protos = ictx->rc_type;
+	}
 
 	imon_set_display_type(ictx);
 
+	if (ictx->rc_type == RC_TYPE_RC6)
+		rdev->map_name = RC_MAP_IMON_MCE;
+	else
+		rdev->map_name = RC_MAP_IMON_PAD;
+
 	ret = rc_register_device(rdev);
 	if (ret < 0) {
 		dev_err(ictx->dev, "remote input dev register failed\n");
@@ -2108,18 +2100,6 @@
 		goto find_endpoint_failed;
 	}
 
-	ictx->idev = imon_init_idev(ictx);
-	if (!ictx->idev) {
-		dev_err(dev, "%s: input device setup failed\n", __func__);
-		goto idev_setup_failed;
-	}
-
-	ictx->rdev = imon_init_rdev(ictx);
-	if (!ictx->rdev) {
-		dev_err(dev, "%s: rc device setup failed\n", __func__);
-		goto rdev_setup_failed;
-	}
-
 	usb_fill_int_urb(ictx->rx_urb_intf0, ictx->usbdev_intf0,
 		usb_rcvintpipe(ictx->usbdev_intf0,
 			ictx->rx_endpoint_intf0->bEndpointAddress),
@@ -2133,13 +2113,25 @@
 		goto urb_submit_failed;
 	}
 
+	ictx->idev = imon_init_idev(ictx);
+	if (!ictx->idev) {
+		dev_err(dev, "%s: input device setup failed\n", __func__);
+		goto idev_setup_failed;
+	}
+
+	ictx->rdev = imon_init_rdev(ictx);
+	if (!ictx->rdev) {
+		dev_err(dev, "%s: rc device setup failed\n", __func__);
+		goto rdev_setup_failed;
+	}
+
 	return ictx;
 
-urb_submit_failed:
-	rc_unregister_device(ictx->rdev);
 rdev_setup_failed:
 	input_unregister_device(ictx->idev);
 idev_setup_failed:
+	usb_kill_urb(ictx->rx_urb_intf0);
+urb_submit_failed:
 find_endpoint_failed:
 	mutex_unlock(&ictx->lock);
 	usb_free_urb(tx_urb);
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index f011c5d..1c5cc65 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -1,4 +1,4 @@
-/* ir-lirc-codec.c - ir-core to classic lirc interface bridge
+/* ir-lirc-codec.c - rc-core to classic lirc interface bridge
  *
  * Copyright (C) 2010 by Jarod Wilson <jarod@redhat.com>
  *
@@ -47,6 +47,7 @@
 	/* Carrier reports */
 	if (ev.carrier_report) {
 		sample = LIRC_FREQUENCY(ev.carrier);
+		IR_dprintk(2, "carrier report (freq: %d)\n", sample);
 
 	/* Packet end */
 	} else if (ev.timeout) {
@@ -62,6 +63,7 @@
 			return 0;
 
 		sample = LIRC_TIMEOUT(ev.duration / 1000);
+		IR_dprintk(2, "timeout report (duration: %d)\n", sample);
 
 	/* Normal sample */
 	} else {
@@ -85,6 +87,8 @@
 
 		sample = ev.pulse ? LIRC_PULSE(ev.duration / 1000) :
 					LIRC_SPACE(ev.duration / 1000);
+		IR_dprintk(2, "delivering %uus %s to lirc_dev\n",
+			   TO_US(ev.duration), TO_STR(ev.pulse));
 	}
 
 	lirc_buffer_write(dev->raw->lirc.drv->rbuf,
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 185badd..73230ff 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -233,7 +233,7 @@
 
 /* used internally by the sysfs interface */
 u64
-ir_raw_get_allowed_protocols()
+ir_raw_get_allowed_protocols(void)
 {
 	u64 protocols;
 	mutex_lock(&ir_raw_handler_lock);
diff --git a/drivers/media/rc/keymaps/rc-dib0700-nec.c b/drivers/media/rc/keymaps/rc-dib0700-nec.c
index c59851b..7a5f530 100644
--- a/drivers/media/rc/keymaps/rc-dib0700-nec.c
+++ b/drivers/media/rc/keymaps/rc-dib0700-nec.c
@@ -19,35 +19,35 @@
 
 static struct rc_map_table dib0700_nec_table[] = {
 	/* Key codes for the Pixelview SBTVD remote */
-	{ 0x8613, KEY_MUTE },
-	{ 0x8612, KEY_POWER },
-	{ 0x8601, KEY_1 },
-	{ 0x8602, KEY_2 },
-	{ 0x8603, KEY_3 },
-	{ 0x8604, KEY_4 },
-	{ 0x8605, KEY_5 },
-	{ 0x8606, KEY_6 },
-	{ 0x8607, KEY_7 },
-	{ 0x8608, KEY_8 },
-	{ 0x8609, KEY_9 },
-	{ 0x8600, KEY_0 },
-	{ 0x860d, KEY_CHANNELUP },
-	{ 0x8619, KEY_CHANNELDOWN },
-	{ 0x8610, KEY_VOLUMEUP },
-	{ 0x860c, KEY_VOLUMEDOWN },
+	{ 0x866b13, KEY_MUTE },
+	{ 0x866b12, KEY_POWER },
+	{ 0x866b01, KEY_1 },
+	{ 0x866b02, KEY_2 },
+	{ 0x866b03, KEY_3 },
+	{ 0x866b04, KEY_4 },
+	{ 0x866b05, KEY_5 },
+	{ 0x866b06, KEY_6 },
+	{ 0x866b07, KEY_7 },
+	{ 0x866b08, KEY_8 },
+	{ 0x866b09, KEY_9 },
+	{ 0x866b00, KEY_0 },
+	{ 0x866b0d, KEY_CHANNELUP },
+	{ 0x866b19, KEY_CHANNELDOWN },
+	{ 0x866b10, KEY_VOLUMEUP },
+	{ 0x866b0c, KEY_VOLUMEDOWN },
 
-	{ 0x860a, KEY_CAMERA },
-	{ 0x860b, KEY_ZOOM },
-	{ 0x861b, KEY_BACKSPACE },
-	{ 0x8615, KEY_ENTER },
+	{ 0x866b0a, KEY_CAMERA },
+	{ 0x866b0b, KEY_ZOOM },
+	{ 0x866b1b, KEY_BACKSPACE },
+	{ 0x866b15, KEY_ENTER },
 
-	{ 0x861d, KEY_UP },
-	{ 0x861e, KEY_DOWN },
-	{ 0x860e, KEY_LEFT },
-	{ 0x860f, KEY_RIGHT },
+	{ 0x866b1d, KEY_UP },
+	{ 0x866b1e, KEY_DOWN },
+	{ 0x866b0e, KEY_LEFT },
+	{ 0x866b0f, KEY_RIGHT },
 
-	{ 0x8618, KEY_RECORD },
-	{ 0x861a, KEY_STOP },
+	{ 0x866b18, KEY_RECORD },
+	{ 0x866b1a, KEY_STOP },
 
 	/* Key codes for the EvolutePC TVWay+ remote */
 	{ 0x7a00, KEY_MENU },
diff --git a/drivers/media/rc/keymaps/rc-rc6-mce.c b/drivers/media/rc/keymaps/rc-rc6-mce.c
index 3bf3337..2f5dc06 100644
--- a/drivers/media/rc/keymaps/rc-rc6-mce.c
+++ b/drivers/media/rc/keymaps/rc-rc6-mce.c
@@ -3,6 +3,9 @@
  *
  * Copyright (c) 2010 by Jarod Wilson <jarod@redhat.com>
  *
+ * See http://mediacenterguides.com/book/export/html/31 for details on
+ * key mappings.
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -60,6 +63,9 @@
 	{ 0x800f0426, KEY_EPG },		/* Guide */
 	{ 0x800f0427, KEY_ZOOM },		/* Aspect */
 
+	{ 0x800f0432, KEY_MODE },		/* Visualization */
+	{ 0x800f0433, KEY_PRESENTATION },	/* Slide Show */
+	{ 0x800f0434, KEY_EJECTCD },
 	{ 0x800f043a, KEY_BRIGHTNESSUP },
 
 	{ 0x800f0446, KEY_TV },
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 0fef6ef..6df0a49 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -48,7 +48,6 @@
 #define USB_BUFLEN		32 /* USB reception buffer length */
 #define USB_CTRL_MSG_SZ		2  /* Size of usb ctrl msg on gen1 hw */
 #define MCE_G1_INIT_MSGS	40 /* Init messages on gen1 hw to throw out */
-#define MS_TO_NS(msec)		((msec) * 1000)
 
 /* MCE constants */
 #define MCE_CMDBUF_SIZE		384  /* MCE Command buffer length */
@@ -817,7 +816,7 @@
 	switch (ir->buf_in[index]) {
 	/* 2-byte return value commands */
 	case MCE_CMD_S_TIMEOUT:
-		ir->rc->timeout = MS_TO_NS((hi << 8 | lo) / 2);
+		ir->rc->timeout = US_TO_NS((hi << 8 | lo) / 2);
 		break;
 
 	/* 1-byte return value commands */
@@ -856,9 +855,10 @@
 			break;
 		case PARSE_IRDATA:
 			ir->rem--;
+			init_ir_raw_event(&rawir);
 			rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0);
 			rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
-					 * MS_TO_NS(MCE_TIME_UNIT);
+					 * US_TO_NS(MCE_TIME_UNIT);
 
 			dev_dbg(ir->dev, "Storing %s with duration %d\n",
 				rawir.pulse ? "pulse" : "space",
@@ -884,6 +884,8 @@
 					     i, ir->rem + 1, false);
 			if (ir->rem)
 				ir->parser_state = PARSE_IRDATA;
+			else
+				ir_raw_event_reset(ir->rc);
 			break;
 		}
 
@@ -1061,7 +1063,7 @@
 	rc->priv = ir;
 	rc->driver_type = RC_DRIVER_IR_RAW;
 	rc->allowed_protos = RC_TYPE_ALL;
-	rc->timeout = MS_TO_NS(1000);
+	rc->timeout = US_TO_NS(1000);
 	if (!ir->flags.no_tx) {
 		rc->s_tx_mask = mceusb_set_tx_mask;
 		rc->s_tx_carrier = mceusb_set_tx_carrier;
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index dd4caf8..273d9d6 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -460,7 +460,7 @@
 		return 0;
 	}
 
-	carrier = (count * 1000000) / duration;
+	carrier = MS_TO_NS(count) / duration;
 
 	if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
 		nvt_dbg("WTF? Carrier frequency out of range!");
@@ -612,8 +612,8 @@
 		sample = nvt->buf[i];
 
 		rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
-		rawir.duration = (sample & BUF_LEN_MASK)
-					* SAMPLE_PERIOD * 1000;
+		rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
+					  * SAMPLE_PERIOD);
 
 		if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) {
 			if (nvt->rawir.pulse == rawir.pulse)
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 72be8a0..512a2f4 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -458,21 +458,27 @@
 		index = ir_lookup_by_scancode(rc_map, scancode);
 	}
 
-	if (index >= rc_map->len) {
-		if (!(ke->flags & INPUT_KEYMAP_BY_INDEX))
-			IR_dprintk(1, "unknown key for scancode 0x%04x\n",
-				   scancode);
+	if (index < rc_map->len) {
+		entry = &rc_map->scan[index];
+
+		ke->index = index;
+		ke->keycode = entry->keycode;
+		ke->len = sizeof(entry->scancode);
+		memcpy(ke->scancode, &entry->scancode, sizeof(entry->scancode));
+
+	} else if (!(ke->flags & INPUT_KEYMAP_BY_INDEX)) {
+		/*
+		 * We do not really know the valid range of scancodes
+		 * so let's respond with KEY_RESERVED to anything we
+		 * do not have mapping for [yet].
+		 */
+		ke->index = index;
+		ke->keycode = KEY_RESERVED;
+	} else {
 		retval = -EINVAL;
 		goto out;
 	}
 
-	entry = &rc_map->scan[index];
-
-	ke->index = index;
-	ke->keycode = entry->keycode;
-	ke->len = sizeof(entry->scancode);
-	memcpy(ke->scancode, &entry->scancode, sizeof(entry->scancode));
-
 	retval = 0;
 
 out:
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 6e2911c..e435d94 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -164,7 +164,7 @@
 				sz->signal_start.tv_usec -
 				sz->signal_last.tv_usec);
 			rawir.duration -= sz->sum;
-			rawir.duration *= 1000;
+			rawir.duration = US_TO_NS(rawir.duration);
 			rawir.duration &= IR_MAX_DURATION;
 		}
 		sz_push(sz, rawir);
@@ -177,7 +177,7 @@
 	rawir.duration = ((int) value) * SZ_RESOLUTION;
 	rawir.duration += SZ_RESOLUTION / 2;
 	sz->sum += rawir.duration;
-	rawir.duration *= 1000;
+	rawir.duration = US_TO_NS(rawir.duration);
 	rawir.duration &= IR_MAX_DURATION;
 	sz_push(sz, rawir);
 }
@@ -197,7 +197,7 @@
 	rawir.duration = ((int) value) * SZ_RESOLUTION;
 	rawir.duration += SZ_RESOLUTION / 2;
 	sz->sum += rawir.duration;
-	rawir.duration *= 1000;
+	rawir.duration = US_TO_NS(rawir.duration);
 	sz_push(sz, rawir);
 }
 
@@ -273,6 +273,7 @@
 				if (sz->timeout_enabled)
 					sz_push(sz, rawir);
 				ir_raw_event_handle(sz->rdev);
+				ir_raw_event_reset(sz->rdev);
 			} else {
 				sz_push_full_space(sz, sz->buf_in[i]);
 			}
@@ -290,6 +291,7 @@
 		}
 	}
 
+	ir_raw_event_handle(sz->rdev);
 	usb_submit_urb(urb, GFP_ATOMIC);
 
 	return;
@@ -430,13 +432,13 @@
 	sz->decoder_state = PulseSpace;
 	/* FIXME: don't yet have a way to set this */
 	sz->timeout_enabled = true;
-	sz->rdev->timeout = (((SZ_TIMEOUT * SZ_RESOLUTION * 1000) &
+	sz->rdev->timeout = ((US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION) &
 				IR_MAX_DURATION) | 0x03000000);
 	#if 0
 	/* not yet supported, depends on patches from maxim */
 	/* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */
-	sz->min_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000;
-	sz->max_timeout = SZ_TIMEOUT * SZ_RESOLUTION * 1000;
+	sz->min_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION);
+	sz->max_timeout = US_TO_NS(SZ_TIMEOUT * SZ_RESOLUTION);
 	#endif
 
 	do_gettimeofday(&sz->signal_start);
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index eb875af..aa02160 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -78,7 +78,7 @@
 
 config VIDEO_HELPER_CHIPS_AUTO
 	bool "Autoselect pertinent encoders/decoders and other helper chips"
-	default y if !EMBEDDED
+	default y if !EXPERT
 	---help---
 	  Most video cards may require additional modules to encode or
 	  decode audio/video standards. This option will autoselect
@@ -141,15 +141,6 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called tda9840.
 
-config VIDEO_TDA9875
-	tristate "Philips TDA9875 audio processor"
-	depends on VIDEO_V4L2 && I2C
-	---help---
-	  Support for tda9875 audio decoder chip found on some bt8xx boards.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called tda9875.
-
 config VIDEO_TEA6415C
 	tristate "Philips TEA6415C audio processor"
 	depends on I2C
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 81e38cb..a509d31 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -27,7 +27,6 @@
 obj-$(CONFIG_VIDEO_TUNER) += tuner.o
 obj-$(CONFIG_VIDEO_TVAUDIO) += tvaudio.o
 obj-$(CONFIG_VIDEO_TDA7432) += tda7432.o
-obj-$(CONFIG_VIDEO_TDA9875) += tda9875.o
 obj-$(CONFIG_VIDEO_SAA6588) += saa6588.o
 obj-$(CONFIG_VIDEO_TDA9840) += tda9840.o
 obj-$(CONFIG_VIDEO_TEA6415C) += tea6415c.o
diff --git a/drivers/media/video/adv7175.c b/drivers/media/video/adv7175.c
index f318b51..d2327db 100644
--- a/drivers/media/video/adv7175.c
+++ b/drivers/media/video/adv7175.c
@@ -303,11 +303,22 @@
 	return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7175, 0);
 }
 
+static int adv7175_s_power(struct v4l2_subdev *sd, int on)
+{
+	if (on)
+		adv7175_write(sd, 0x01, 0x00);
+	else
+		adv7175_write(sd, 0x01, 0x78);
+
+	return 0;
+}
+
 /* ----------------------------------------------------------------------- */
 
 static const struct v4l2_subdev_core_ops adv7175_core_ops = {
 	.g_chip_ident = adv7175_g_chip_ident,
 	.init = adv7175_init,
+	.s_power = adv7175_s_power,
 };
 
 static const struct v4l2_subdev_video_ops adv7175_video_ops = {
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 49efcf6..7f58756 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -1373,7 +1373,6 @@
 		.gpiomute 	= 0x1800,
 		.audio_mode_gpio= fv2000s_audio,
 		.no_msp34xx	= 1,
-		.no_tda9875	= 1,
 		.needs_tvaudio  = 1,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_PHILIPS_PAL,
@@ -1511,7 +1510,6 @@
 		.gpiomute 	= 0x09,
 		.needs_tvaudio  = 1,
 		.no_msp34xx	= 1,
-		.no_tda9875	= 1,
 		.pll		= PLL_28,
 		.tuner_type	= TUNER_PHILIPS_PAL,
 		.tuner_addr	= ADDR_UNSET,
@@ -1550,7 +1548,6 @@
 		.gpiomask2      = 0x07ff,
 		.muxsel         = MUXSEL(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3),
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.muxsel_hook    = rv605_muxsel,
@@ -1686,7 +1683,6 @@
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 	[BTTV_BOARD_OSPREY1x0_848] = {
@@ -1699,7 +1695,6 @@
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 
@@ -1714,7 +1709,6 @@
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 	[BTTV_BOARD_OSPREY1x1] = {
@@ -1727,7 +1721,6 @@
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 	[BTTV_BOARD_OSPREY1x1_SVID] = {
@@ -1740,7 +1733,6 @@
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 	[BTTV_BOARD_OSPREY2xx] = {
@@ -1753,7 +1745,6 @@
 		.tuner_type	= TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 
@@ -1768,7 +1759,6 @@
 		.tuner_type	= TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 	[BTTV_BOARD_OSPREY2x0] = {
@@ -1781,7 +1771,6 @@
 		.tuner_type	= TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 	[BTTV_BOARD_OSPREY500] = {
@@ -1794,7 +1783,6 @@
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 	[BTTV_BOARD_OSPREY540] = {
@@ -1805,7 +1793,6 @@
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 
@@ -1820,7 +1807,6 @@
 		.tuner_type	= TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,      /* must avoid, conflicts with the bt860 */
 	},
 	[BTTV_BOARD_IDS_EAGLE] = {
@@ -1835,7 +1821,6 @@
 		.muxsel         = MUXSEL(2, 2, 2, 2),
 		.muxsel_hook    = eagle_muxsel,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.pll            = PLL_28,
 	},
 	[BTTV_BOARD_PINNACLESAT] = {
@@ -1846,7 +1831,6 @@
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.muxsel         = MUXSEL(3, 1),
 		.pll            = PLL_28,
@@ -1897,7 +1881,6 @@
 		.svhs           = 2,
 		.gpiomask       = 0,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.muxsel         = MUXSEL(2, 0, 1),
 		.pll            = PLL_28,
@@ -1970,7 +1953,6 @@
 		/* Tuner, CVid, SVid, CVid over SVid connector */
 		.muxsel         = MUXSEL(2, 3, 1, 1),
 		.gpiomask       = 0,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.tuner_type     = TUNER_PHILIPS_PAL_I,
 		.tuner_addr	= ADDR_UNSET,
@@ -2017,7 +1999,6 @@
 		.muxsel         = MUXSEL(2,2,2,2, 3,3,3,3, 1,1,1,1, 0,0,0,0),
 		.muxsel_hook    = xguard_muxsel,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.pll            = PLL_28,
 	},
@@ -2029,7 +2010,6 @@
 		.svhs           = NO_SVHS,
 		.muxsel         = MUXSEL(2, 3, 1, 0),
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_ABSENT,
@@ -2134,7 +2114,6 @@
 		.svhs           = NO_SVHS,   /* card has no svhs */
 		.needs_tvaudio  = 0,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.gpiomask       = 0x00,
 		.muxsel         = MUXSEL(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
@@ -2156,7 +2135,6 @@
 	[BTTV_BOARD_TWINHAN_DST] = {
 		.name           = "Twinhan DST + clones",
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
@@ -2171,7 +2149,6 @@
 		/* Vid In, SVid In, Vid over SVid in connector */
 		.muxsel		= MUXSEL(3, 1, 1, 3),
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
@@ -2226,7 +2203,6 @@
 		.svhs           = NO_SVHS,
 		.muxsel         = MUXSEL(2, 3, 1, 0),
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.needs_tvaudio  = 0,
 		.tuner_type     = TUNER_ABSENT,
@@ -2278,7 +2254,6 @@
 		.gpiomask       = 0,
 		.gpiomask2      = 0x3C<<16,/*Set the GPIO[18]->GPIO[21] as output pin.==> drive the video inputs through analog multiplexers*/
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		/*878A input is always MUX0, see above.*/
 		.muxsel         = MUXSEL(2, 2, 2, 2),
@@ -2302,7 +2277,6 @@
 		.tuner_type     = TUNER_TEMIC_PAL,
 		.tuner_addr	= ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 	},
 	[BTTV_BOARD_AVDVBT_771] = {
 		/* Wolfram Joost <wojo@frokaschwei.de> */
@@ -2313,7 +2287,6 @@
 		.tuner_addr	= ADDR_UNSET,
 		.muxsel         = MUXSEL(3, 3),
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.pll            = PLL_28,
 		.has_dvb        = 1,
@@ -2329,7 +2302,6 @@
 		.svhs           = 1,
 		.muxsel         = MUXSEL(3, 1, 2, 0), /* Comp0, S-Video, ?, ? */
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.pll            = PLL_28,
 		.tuner_type     = TUNER_ABSENT,
@@ -2393,7 +2365,6 @@
 		/* Chris Pascoe <c.pascoe@itee.uq.edu.au> */
 		.name           = "DViCO FusionHDTV DVB-T Lite",
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.pll            = PLL_28,
 		.no_video       = 1,
@@ -2440,7 +2411,6 @@
 		.muxsel         = MUXSEL(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2),
 		.pll		= PLL_28,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432	= 1,
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
@@ -2478,7 +2448,6 @@
 		.pll		= PLL_28,
 		.no_msp34xx	= 1,
 		.no_tda7432	= 1,
-		.no_tda9875	= 1,
 		.muxsel_hook	= kodicom4400r_muxsel,
 	},
 	[BTTV_BOARD_KODICOM_4400R_SL] = {
@@ -2500,7 +2469,6 @@
 		.pll		= PLL_28,
 		.no_msp34xx	= 1,
 		.no_tda7432	= 1,
-		.no_tda9875	= 1,
 		.muxsel_hook	= kodicom4400r_muxsel,
 	},
 		/* ---- card 0x86---------------------------------- */
@@ -2530,7 +2498,6 @@
 		.gpiomux        = { 0x00400005, 0, 0x00000001, 0 },
 		.gpiomute 	= 0x00c00007,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.has_dvb        = 1,
 	},
@@ -2630,7 +2597,6 @@
 		.tuner_type     = TUNER_ABSENT,
 		.tuner_addr     = ADDR_UNSET,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 		/* ---- card 0x8d ---------------------------------- */
@@ -2658,7 +2624,6 @@
 		.muxsel		= MUXSEL(2, 3, 1, 1),
 		.gpiomux 	= { 100000, 100002, 100002, 100000 },
 		.no_msp34xx	= 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.pll		= PLL_28,
 		.tuner_type	= TUNER_TNF_5335MF,
@@ -2674,7 +2639,6 @@
 		.gpiomask	= 0x0f, /* old: 7 */
 		.muxsel		= MUXSEL(0, 1, 3, 2), /* Composite 0-3 */
 		.no_msp34xx	= 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 		.tuner_type	= TUNER_ABSENT,
 		.tuner_addr	= ADDR_UNSET,
@@ -2732,7 +2696,6 @@
 		.gpiomux        = { 0x00400005, 0, 0x00000001, 0 },
 		.gpiomute 	= 0x00c00007,
 		.no_msp34xx     = 1,
-		.no_tda9875     = 1,
 		.no_tda7432     = 1,
 	},
 	/* ---- card 0x95---------------------------------- */
@@ -2874,7 +2837,6 @@
 		.pll		= PLL_28,
 		.no_msp34xx	= 1,
 		.no_tda7432	= 1,
-		.no_tda9875	= 1,
 		.muxsel_hook    = gv800s_muxsel,
 	},
 		[BTTV_BOARD_GEOVISION_GV800S_SL] = {
@@ -2899,7 +2861,6 @@
 		.pll		= PLL_28,
 		.no_msp34xx	= 1,
 		.no_tda7432	= 1,
-		.no_tda9875	= 1,
 		.muxsel_hook    = gv800s_muxsel,
 	},
 	[BTTV_BOARD_PV183] = {
diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h
index fd62bf1..c633359 100644
--- a/drivers/media/video/bt8xx/bttv.h
+++ b/drivers/media/video/bt8xx/bttv.h
@@ -234,7 +234,6 @@
 
 	/* i2c audio flags */
 	unsigned int no_msp34xx:1;
-	unsigned int no_tda9875:1;
 	unsigned int no_tda7432:1;
 	unsigned int needs_tvaudio:1;
 	unsigned int msp34xx_alt:1;
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index 49f1b8f..55ffd60 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -2001,6 +2001,11 @@
 		.min_width = 320,
 		.min_height = 240,
 	};
+	struct i2c_board_info ov7670_info = {
+		.type = "ov7670",
+		.addr = 0x42,
+		.platform_data = &sensor_cfg,
+	};
 
 	/*
 	 * Start putting together one of our big camera structures.
@@ -2062,9 +2067,9 @@
 	if (dmi_check_system(olpc_xo1_dmi))
 		sensor_cfg.clock_speed = 45;
 
-	cam->sensor_addr = 0x42;
-	cam->sensor = v4l2_i2c_new_subdev_cfg(&cam->v4l2_dev, &cam->i2c_adapter,
-			"ov7670", 0, &sensor_cfg, cam->sensor_addr, NULL);
+	cam->sensor_addr = ov7670_info.addr;
+	cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev, &cam->i2c_adapter,
+			&ov7670_info, NULL);
 	if (cam->sensor == NULL) {
 		ret = -ENODEV;
 		goto out_smbus;
diff --git a/drivers/media/video/cpia2/cpia2.h b/drivers/media/video/cpia2/cpia2.h
index 916c13d..6d6d184 100644
--- a/drivers/media/video/cpia2/cpia2.h
+++ b/drivers/media/video/cpia2/cpia2.h
@@ -378,7 +378,7 @@
 
 struct camera_data {
 	/* locks */
-	struct mutex busy_lock;	/* guard against SMP multithreading */
+	struct mutex v4l2_lock;	/* serialize file operations */
 	struct v4l2_prio_state prio;
 
 	/* camera status */
diff --git a/drivers/media/video/cpia2/cpia2_core.c b/drivers/media/video/cpia2/cpia2_core.c
index 9606bc0..aaffca8 100644
--- a/drivers/media/video/cpia2/cpia2_core.c
+++ b/drivers/media/video/cpia2/cpia2_core.c
@@ -2247,7 +2247,7 @@
 
 
 	cam->present = 1;
-	mutex_init(&cam->busy_lock);
+	mutex_init(&cam->v4l2_lock);
 	init_waitqueue_head(&cam->wq_stream);
 
 	return cam;
@@ -2365,9 +2365,9 @@
 		char __user *buf, unsigned long count, int noblock)
 {
 	struct framebuf *frame;
-	if (!count) {
+
+	if (!count)
 		return 0;
-	}
 
 	if (!buf) {
 		ERR("%s: buffer NULL\n",__func__);
@@ -2379,17 +2379,12 @@
 		return -EINVAL;
 	}
 
-	/* make this _really_ smp and multithread-safe */
-	if (mutex_lock_interruptible(&cam->busy_lock))
-		return -ERESTARTSYS;
-
 	if (!cam->present) {
 		LOG("%s: camera removed\n",__func__);
-		mutex_unlock(&cam->busy_lock);
 		return 0;	/* EOF */
 	}
 
-	if(!cam->streaming) {
+	if (!cam->streaming) {
 		/* Start streaming */
 		cpia2_usb_stream_start(cam,
 				       cam->params.camera_state.stream_mode);
@@ -2398,42 +2393,31 @@
 	/* Copy cam->curbuff in case it changes while we're processing */
 	frame = cam->curbuff;
 	if (noblock && frame->status != FRAME_READY) {
-		mutex_unlock(&cam->busy_lock);
 		return -EAGAIN;
 	}
 
-	if(frame->status != FRAME_READY) {
-		mutex_unlock(&cam->busy_lock);
+	if (frame->status != FRAME_READY) {
+		mutex_unlock(&cam->v4l2_lock);
 		wait_event_interruptible(cam->wq_stream,
 			       !cam->present ||
 			       (frame = cam->curbuff)->status == FRAME_READY);
+		mutex_lock(&cam->v4l2_lock);
 		if (signal_pending(current))
 			return -ERESTARTSYS;
-		/* make this _really_ smp and multithread-safe */
-		if (mutex_lock_interruptible(&cam->busy_lock)) {
-			return -ERESTARTSYS;
-		}
-		if(!cam->present) {
-			mutex_unlock(&cam->busy_lock);
+		if (!cam->present)
 			return 0;
-		}
 	}
 
 	/* copy data to user space */
-	if (frame->length > count) {
-		mutex_unlock(&cam->busy_lock);
+	if (frame->length > count)
 		return -EFAULT;
-	}
-	if (copy_to_user(buf, frame->data, frame->length)) {
-		mutex_unlock(&cam->busy_lock);
+	if (copy_to_user(buf, frame->data, frame->length))
 		return -EFAULT;
-	}
 
 	count = frame->length;
 
 	frame->status = FRAME_EMPTY;
 
-	mutex_unlock(&cam->busy_lock);
 	return count;
 }
 
@@ -2447,17 +2431,13 @@
 {
 	unsigned int status=0;
 
-	if(!cam) {
+	if (!cam) {
 		ERR("%s: Internal error, camera_data not found!\n",__func__);
 		return POLLERR;
 	}
 
-	mutex_lock(&cam->busy_lock);
-
-	if(!cam->present) {
-		mutex_unlock(&cam->busy_lock);
+	if (!cam->present)
 		return POLLHUP;
-	}
 
 	if(!cam->streaming) {
 		/* Start streaming */
@@ -2465,16 +2445,13 @@
 				       cam->params.camera_state.stream_mode);
 	}
 
-	mutex_unlock(&cam->busy_lock);
 	poll_wait(filp, &cam->wq_stream, wait);
-	mutex_lock(&cam->busy_lock);
 
 	if(!cam->present)
 		status = POLLHUP;
 	else if(cam->curbuff->status == FRAME_READY)
 		status = POLLIN | POLLRDNORM;
 
-	mutex_unlock(&cam->busy_lock);
 	return status;
 }
 
@@ -2496,29 +2473,19 @@
 
 	DBG("mmap offset:%ld size:%ld\n", start_offset, size);
 
-	/* make this _really_ smp-safe */
-	if (mutex_lock_interruptible(&cam->busy_lock))
-		return -ERESTARTSYS;
-
-	if (!cam->present) {
-		mutex_unlock(&cam->busy_lock);
+	if (!cam->present)
 		return -ENODEV;
-	}
 
 	if (size > cam->frame_size*cam->num_frames  ||
 	    (start_offset % cam->frame_size) != 0 ||
-	    (start_offset+size > cam->frame_size*cam->num_frames)) {
-		mutex_unlock(&cam->busy_lock);
+	    (start_offset+size > cam->frame_size*cam->num_frames))
 		return -EINVAL;
-	}
 
 	pos = ((unsigned long) (cam->frame_buffer)) + start_offset;
 	while (size > 0) {
 		page = kvirt_to_pa(pos);
-		if (remap_pfn_range(vma, start, page >> PAGE_SHIFT, PAGE_SIZE, PAGE_SHARED)) {
-			mutex_unlock(&cam->busy_lock);
+		if (remap_pfn_range(vma, start, page >> PAGE_SHIFT, PAGE_SIZE, PAGE_SHARED))
 			return -EAGAIN;
-		}
 		start += PAGE_SIZE;
 		pos += PAGE_SIZE;
 		if (size > PAGE_SIZE)
@@ -2528,7 +2495,5 @@
 	}
 
 	cam->mmapped = true;
-	mutex_unlock(&cam->busy_lock);
 	return 0;
 }
-
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c
index 7edf80b..9bad398 100644
--- a/drivers/media/video/cpia2/cpia2_v4l.c
+++ b/drivers/media/video/cpia2/cpia2_v4l.c
@@ -238,59 +238,40 @@
 static int cpia2_open(struct file *file)
 {
 	struct camera_data *cam = video_drvdata(file);
-	int retval = 0;
+	struct cpia2_fh *fh;
 
 	if (!cam) {
 		ERR("Internal error, camera_data not found!\n");
 		return -ENODEV;
 	}
 
-	if(mutex_lock_interruptible(&cam->busy_lock))
-		return -ERESTARTSYS;
+	if (!cam->present)
+		return -ENODEV;
 
-	if(!cam->present) {
-		retval = -ENODEV;
-		goto err_return;
+	if (cam->open_count == 0) {
+		if (cpia2_allocate_buffers(cam))
+			return -ENOMEM;
+
+		/* reset the camera */
+		if (cpia2_reset_camera(cam) < 0)
+			return -EIO;
+
+		cam->APP_len = 0;
+		cam->COM_len = 0;
 	}
 
-	if (cam->open_count > 0) {
-		goto skip_init;
-	}
-
-	if (cpia2_allocate_buffers(cam)) {
-		retval = -ENOMEM;
-		goto err_return;
-	}
-
-	/* reset the camera */
-	if (cpia2_reset_camera(cam) < 0) {
-		retval = -EIO;
-		goto err_return;
-	}
-
-	cam->APP_len = 0;
-	cam->COM_len = 0;
-
-skip_init:
-	{
-		struct cpia2_fh *fh = kmalloc(sizeof(*fh),GFP_KERNEL);
-		if(!fh) {
-			retval = -ENOMEM;
-			goto err_return;
-		}
-		file->private_data = fh;
-		fh->prio = V4L2_PRIORITY_UNSET;
-		v4l2_prio_open(&cam->prio, &fh->prio);
-		fh->mmapped = 0;
-	}
+	fh = kmalloc(sizeof(*fh), GFP_KERNEL);
+	if (!fh)
+		return -ENOMEM;
+	file->private_data = fh;
+	fh->prio = V4L2_PRIORITY_UNSET;
+	v4l2_prio_open(&cam->prio, &fh->prio);
+	fh->mmapped = 0;
 
 	++cam->open_count;
 
 	cpia2_dbg_dump_registers(cam);
-
-err_return:
-	mutex_unlock(&cam->busy_lock);
-	return retval;
+	return 0;
 }
 
 /******************************************************************************
@@ -304,15 +285,11 @@
 	struct camera_data *cam = video_get_drvdata(dev);
 	struct cpia2_fh *fh = file->private_data;
 
-	mutex_lock(&cam->busy_lock);
-
 	if (cam->present &&
-	    (cam->open_count == 1
-	     || fh->prio == V4L2_PRIORITY_RECORD
-	    )) {
+	    (cam->open_count == 1 || fh->prio == V4L2_PRIORITY_RECORD)) {
 		cpia2_usb_stream_stop(cam);
 
-		if(cam->open_count == 1) {
+		if (cam->open_count == 1) {
 			/* save camera state for later open */
 			cpia2_save_camera_state(cam);
 
@@ -321,26 +298,21 @@
 		}
 	}
 
-	{
-		if(fh->mmapped)
-			cam->mmapped = 0;
-		v4l2_prio_close(&cam->prio, fh->prio);
-		file->private_data = NULL;
-		kfree(fh);
-	}
+	if (fh->mmapped)
+		cam->mmapped = 0;
+	v4l2_prio_close(&cam->prio, fh->prio);
+	file->private_data = NULL;
+	kfree(fh);
 
 	if (--cam->open_count == 0) {
 		cpia2_free_buffers(cam);
 		if (!cam->present) {
 			video_unregister_device(dev);
-			mutex_unlock(&cam->busy_lock);
 			kfree(cam);
 			return 0;
 		}
 	}
 
-	mutex_unlock(&cam->busy_lock);
-
 	return 0;
 }
 
@@ -405,11 +377,11 @@
 			return 0;
 		}
 
-		mutex_unlock(&cam->busy_lock);
+		mutex_unlock(&cam->v4l2_lock);
 		wait_event_interruptible(cam->wq_stream,
 					 !cam->streaming ||
 					 frame->status == FRAME_READY);
-		mutex_lock(&cam->busy_lock);
+		mutex_lock(&cam->v4l2_lock);
 		if (signal_pending(current))
 			return -ERESTARTSYS;
 		if(!cam->present)
@@ -1293,11 +1265,11 @@
 	if(frame < 0) {
 		/* Wait for a frame to become available */
 		struct framebuf *cb=cam->curbuff;
-		mutex_unlock(&cam->busy_lock);
+		mutex_unlock(&cam->v4l2_lock);
 		wait_event_interruptible(cam->wq_stream,
 					 !cam->present ||
 					 (cb=cam->curbuff)->status == FRAME_READY);
-		mutex_lock(&cam->busy_lock);
+		mutex_lock(&cam->v4l2_lock);
 		if (signal_pending(current))
 			return -ERESTARTSYS;
 		if(!cam->present)
@@ -1337,14 +1309,8 @@
 	if (!cam)
 		return -ENOTTY;
 
-	/* make this _really_ smp-safe */
-	if (mutex_lock_interruptible(&cam->busy_lock))
-		return -ERESTARTSYS;
-
-	if (!cam->present) {
-		mutex_unlock(&cam->busy_lock);
+	if (!cam->present)
 		return -ENODEV;
-	}
 
 	/* Priority check */
 	switch (cmd) {
@@ -1352,10 +1318,8 @@
 	{
 		struct cpia2_fh *fh = file->private_data;
 		retval = v4l2_prio_check(&cam->prio, fh->prio);
-		if(retval) {
-			mutex_unlock(&cam->busy_lock);
+		if (retval)
 			return retval;
-		}
 		break;
 	}
 	default:
@@ -1529,7 +1493,6 @@
 		break;
 	}
 
-	mutex_unlock(&cam->busy_lock);
 	return retval;
 }
 
@@ -1596,7 +1559,7 @@
 	.release	= cpia2_close,
 	.read		= cpia2_v4l_read,
 	.poll		= cpia2_v4l_poll,
-	.ioctl		= cpia2_ioctl,
+	.unlocked_ioctl	= cpia2_ioctl,
 	.mmap		= cpia2_mmap,
 };
 
@@ -1620,6 +1583,7 @@
 
 	memcpy(cam->vdev, &cpia2_template, sizeof(cpia2_template));
 	video_set_drvdata(cam->vdev, cam);
+	cam->vdev->lock = &cam->v4l2_lock;
 
 	reset_camera_struct_v4l(cam);
 
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 133ec2b..944af8a 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -664,7 +664,7 @@
 {
 	snprintf(cx->in_workq_name, sizeof(cx->in_workq_name), "%s-in",
 		 cx->v4l2_dev.name);
-	cx->in_work_queue = create_singlethread_workqueue(cx->in_workq_name);
+	cx->in_work_queue = alloc_ordered_workqueue(cx->in_workq_name, 0);
 	if (cx->in_work_queue == NULL) {
 		CX18_ERR("Unable to create incoming mailbox handler thread\n");
 		return -ENOMEM;
@@ -672,18 +672,6 @@
 	return 0;
 }
 
-static int __devinit cx18_create_out_workq(struct cx18 *cx)
-{
-	snprintf(cx->out_workq_name, sizeof(cx->out_workq_name), "%s-out",
-		 cx->v4l2_dev.name);
-	cx->out_work_queue = create_workqueue(cx->out_workq_name);
-	if (cx->out_work_queue == NULL) {
-		CX18_ERR("Unable to create outgoing mailbox handler threads\n");
-		return -ENOMEM;
-	}
-	return 0;
-}
-
 static void __devinit cx18_init_in_work_orders(struct cx18 *cx)
 {
 	int i;
@@ -710,16 +698,10 @@
 	mutex_init(&cx->epu2apu_mb_lock);
 	mutex_init(&cx->epu2cpu_mb_lock);
 
-	ret = cx18_create_out_workq(cx);
+	ret = cx18_create_in_workq(cx);
 	if (ret)
 		return ret;
 
-	ret = cx18_create_in_workq(cx);
-	if (ret) {
-		destroy_workqueue(cx->out_work_queue);
-		return ret;
-	}
-
 	cx18_init_in_work_orders(cx);
 
 	/* start counting open_id at 1 */
@@ -1107,7 +1089,6 @@
 	release_mem_region(cx->base_addr, CX18_MEM_SIZE);
 free_workqueues:
 	destroy_workqueue(cx->in_work_queue);
-	destroy_workqueue(cx->out_work_queue);
 err:
 	if (retval == 0)
 		retval = -ENODEV;
@@ -1259,7 +1240,6 @@
 	cx18_halt_firmware(cx);
 
 	destroy_workqueue(cx->in_work_queue);
-	destroy_workqueue(cx->out_work_queue);
 
 	cx18_streams_cleanup(cx, 1);
 
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index f6f3e50..306caac 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -617,9 +617,6 @@
 	struct cx18_in_work_order in_work_order[CX18_MAX_IN_WORK_ORDERS];
 	char epu_debug_str[256]; /* CX18_EPU_DEBUG is rare: use shared space */
 
-	struct workqueue_struct *out_work_queue;
-	char out_workq_name[12]; /* "cx18-NN-out" */
-
 	/* i2c */
 	struct i2c_adapter i2c_adap[2];
 	struct i2c_algo_bit_data i2c_algo[2];
diff --git a/drivers/media/video/cx18/cx18-streams.h b/drivers/media/video/cx18/cx18-streams.h
index 51765eb..713b0e6 100644
--- a/drivers/media/video/cx18/cx18-streams.h
+++ b/drivers/media/video/cx18/cx18-streams.h
@@ -42,8 +42,7 @@
 /* Related to submission of mdls to firmware */
 static inline void cx18_stream_load_fw_queue(struct cx18_stream *s)
 {
-	struct cx18 *cx = s->cx;
-	queue_work(cx->out_work_queue, &s->out_work_order);
+	schedule_work(&s->out_work_order);
 }
 
 static inline void cx18_stream_put_mdl_fw(struct cx18_stream *s,
diff --git a/drivers/media/video/cx231xx/cx231xx-dvb.c b/drivers/media/video/cx231xx/cx231xx-dvb.c
index fe59a1c..363aa60 100644
--- a/drivers/media/video/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/video/cx231xx/cx231xx-dvb.c
@@ -28,7 +28,6 @@
 #include <media/videobuf-vmalloc.h>
 
 #include "xc5000.h"
-#include "dvb_dummy_fe.h"
 #include "s5h1432.h"
 #include "tda18271.h"
 #include "s5h1411.h"
@@ -619,7 +618,7 @@
 
 		if (dev->dvb->frontend == NULL) {
 			printk(DRIVER_NAME
-			       ": Failed to attach dummy front end\n");
+			       ": Failed to attach s5h1411 front end\n");
 			result = -EINVAL;
 			goto out_free;
 		}
@@ -665,7 +664,7 @@
 
 		if (dev->dvb->frontend == NULL) {
 			printk(DRIVER_NAME
-			       ": Failed to attach dummy front end\n");
+			       ": Failed to attach s5h1411 front end\n");
 			result = -EINVAL;
 			goto out_free;
 		}
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index f164618..6fc09dd 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -1682,20 +1682,6 @@
 	return 0;
 }
 
-static int cx25840_s_config(struct v4l2_subdev *sd, int irq, void *platform_data)
-{
-	struct cx25840_state *state = to_state(sd);
-	struct i2c_client *client = v4l2_get_subdevdata(sd);
-
-	if (platform_data) {
-		struct cx25840_platform_data *pdata = platform_data;
-
-		state->pvr150_workaround = pdata->pvr150_workaround;
-		set_input(client, state->vid_input, state->aud_input);
-	}
-	return 0;
-}
-
 static int cx23885_irq_handler(struct v4l2_subdev *sd, u32 status,
 			       bool *handled)
 {
@@ -1787,7 +1773,6 @@
 
 static const struct v4l2_subdev_core_ops cx25840_core_ops = {
 	.log_status = cx25840_log_status,
-	.s_config = cx25840_s_config,
 	.g_chip_ident = cx25840_g_chip_ident,
 	.g_ctrl = v4l2_subdev_g_ctrl,
 	.s_ctrl = v4l2_subdev_s_ctrl,
@@ -1974,7 +1959,6 @@
 	state->vid_input = CX25840_COMPOSITE7;
 	state->aud_input = CX25840_AUDIO8;
 	state->audclk_freq = 48000;
-	state->pvr150_workaround = 0;
 	state->audmode = V4L2_TUNER_MODE_LANG1;
 	state->vbi_line_offset = 8;
 	state->id = id;
@@ -2034,6 +2018,12 @@
 	v4l2_ctrl_cluster(2, &state->volume);
 	v4l2_ctrl_handler_setup(&state->hdl);
 
+	if (client->dev.platform_data) {
+		struct cx25840_platform_data *pdata = client->dev.platform_data;
+
+		state->pvr150_workaround = pdata->pvr150_workaround;
+	}
+
 	cx25840_ir_probe(sd);
 	return 0;
 }
diff --git a/drivers/media/video/davinci/vpif.c b/drivers/media/video/davinci/vpif.c
index 1f532e3..9f3bfc1 100644
--- a/drivers/media/video/davinci/vpif.c
+++ b/drivers/media/video/davinci/vpif.c
@@ -41,6 +41,183 @@
 
 void __iomem *vpif_base;
 
+/**
+ * ch_params: video standard configuration parameters for vpif
+ * The table must include all presets from supported subdevices.
+ */
+const struct vpif_channel_config_params ch_params[] = {
+	/* HDTV formats */
+	{
+		.name = "480p59_94",
+		.width = 720,
+		.height = 480,
+		.frm_fmt = 1,
+		.ycmux_mode = 0,
+		.eav2sav = 138-8,
+		.sav2eav = 720,
+		.l1 = 1,
+		.l3 = 43,
+		.l5 = 523,
+		.vsize = 525,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_480P59_94,
+	},
+	{
+		.name = "576p50",
+		.width = 720,
+		.height = 576,
+		.frm_fmt = 1,
+		.ycmux_mode = 0,
+		.eav2sav = 144-8,
+		.sav2eav = 720,
+		.l1 = 1,
+		.l3 = 45,
+		.l5 = 621,
+		.vsize = 625,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_576P50,
+	},
+	{
+		.name = "720p50",
+		.width = 1280,
+		.height = 720,
+		.frm_fmt = 1,
+		.ycmux_mode = 0,
+		.eav2sav = 700-8,
+		.sav2eav = 1280,
+		.l1 = 1,
+		.l3 = 26,
+		.l5 = 746,
+		.vsize = 750,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_720P50,
+	},
+	{
+		.name = "720p60",
+		.width = 1280,
+		.height = 720,
+		.frm_fmt = 1,
+		.ycmux_mode = 0,
+		.eav2sav = 370 - 8,
+		.sav2eav = 1280,
+		.l1 = 1,
+		.l3 = 26,
+		.l5 = 746,
+		.vsize = 750,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_720P60,
+	},
+	{
+		.name = "1080I50",
+		.width = 1920,
+		.height = 1080,
+		.frm_fmt = 0,
+		.ycmux_mode = 0,
+		.eav2sav = 720 - 8,
+		.sav2eav = 1920,
+		.l1 = 1,
+		.l3 = 21,
+		.l5 = 561,
+		.l7 = 563,
+		.l9 = 584,
+		.l11 = 1124,
+		.vsize = 1125,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_1080I50,
+	},
+	{
+		.name = "1080I60",
+		.width = 1920,
+		.height = 1080,
+		.frm_fmt = 0,
+		.ycmux_mode = 0,
+		.eav2sav = 280 - 8,
+		.sav2eav = 1920,
+		.l1 = 1,
+		.l3 = 21,
+		.l5 = 561,
+		.l7 = 563,
+		.l9 = 584,
+		.l11 = 1124,
+		.vsize = 1125,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_1080I60,
+	},
+	{
+		.name = "1080p60",
+		.width = 1920,
+		.height = 1080,
+		.frm_fmt = 1,
+		.ycmux_mode = 0,
+		.eav2sav = 280 - 8,
+		.sav2eav = 1920,
+		.l1 = 1,
+		.l3 = 42,
+		.l5 = 1122,
+		.vsize = 1125,
+		.capture_format = 0,
+		.vbi_supported = 0,
+		.hd_sd = 1,
+		.dv_preset = V4L2_DV_1080P60,
+	},
+
+	/* SDTV formats */
+	{
+		.name = "NTSC_M",
+		.width = 720,
+		.height = 480,
+		.frm_fmt = 0,
+		.ycmux_mode = 1,
+		.eav2sav = 268,
+		.sav2eav = 1440,
+		.l1 = 1,
+		.l3 = 23,
+		.l5 = 263,
+		.l7 = 266,
+		.l9 = 286,
+		.l11 = 525,
+		.vsize = 525,
+		.capture_format = 0,
+		.vbi_supported = 1,
+		.hd_sd = 0,
+		.stdid = V4L2_STD_525_60,
+	},
+	{
+		.name = "PAL_BDGHIK",
+		.width = 720,
+		.height = 576,
+		.frm_fmt = 0,
+		.ycmux_mode = 1,
+		.eav2sav = 280,
+		.sav2eav = 1440,
+		.l1 = 1,
+		.l3 = 23,
+		.l5 = 311,
+		.l7 = 313,
+		.l9 = 336,
+		.l11 = 624,
+		.vsize = 625,
+		.capture_format = 0,
+		.vbi_supported = 1,
+		.hd_sd = 0,
+		.stdid = V4L2_STD_625_50,
+	},
+};
+
+const unsigned int vpif_ch_params_count = ARRAY_SIZE(ch_params);
+
 static inline void vpif_wr_bit(u32 reg, u32 bit, u32 val)
 {
 	if (val)
diff --git a/drivers/media/video/davinci/vpif.h b/drivers/media/video/davinci/vpif.h
index ebd5c43..10550bd 100644
--- a/drivers/media/video/davinci/vpif.h
+++ b/drivers/media/video/davinci/vpif.h
@@ -577,12 +577,10 @@
 	char name[VPIF_MAX_NAME];	/* Name of the mode */
 	u16 width;			/* Indicates width of the image */
 	u16 height;			/* Indicates height of the image */
-	u8 fps;
-	u8 frm_fmt;			/* Indicates whether this is interlaced
-					 * or progressive format */
-	u8 ycmux_mode;			/* Indicates whether this mode requires
-					 * single or two channels */
-	u16 eav2sav;			/* length of sav 2 eav */
+	u8 frm_fmt;			/* Interlaced (0) or progressive (1) */
+	u8 ycmux_mode;			/* This mode requires one (0) or two (1)
+					   channels */
+	u16 eav2sav;			/* length of eav 2 sav */
 	u16 sav2eav;			/* length of sav 2 eav */
 	u16 l1, l3, l5, l7, l9, l11;	/* Other parameter configurations */
 	u16 vsize;			/* Vertical size of the image */
@@ -590,10 +588,14 @@
 					 * is in BT or in CCD/CMOS */
 	u8  vbi_supported;		/* Indicates whether this mode
 					 * supports capturing vbi or not */
-	u8 hd_sd;
-	v4l2_std_id stdid;
+	u8 hd_sd;			/* HDTV (1) or SDTV (0) format */
+	v4l2_std_id stdid;		/* SDTV format */
+	u32 dv_preset;			/* HDTV format */
 };
 
+extern const unsigned int vpif_ch_params_count;
+extern const struct vpif_channel_config_params ch_params[];
+
 struct vpif_video_params;
 struct vpif_params;
 struct vpif_vbi_params;
diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c
index 193abab..d93ad74 100644
--- a/drivers/media/video/davinci/vpif_capture.c
+++ b/drivers/media/video/davinci/vpif_capture.c
@@ -37,6 +37,7 @@
 #include <linux/slab.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ioctl.h>
+#include <media/v4l2-chip-ident.h>
 
 #include "vpif_capture.h"
 #include "vpif.h"
@@ -81,20 +82,6 @@
 static struct device *vpif_dev;
 
 /**
- * ch_params: video standard configuration parameters for vpif
- */
-static const struct vpif_channel_config_params ch_params[] = {
-	{
-		"NTSC_M", 720, 480, 30, 0, 1, 268, 1440, 1, 23, 263, 266,
-		286, 525, 525, 0, 1, 0, V4L2_STD_525_60,
-	},
-	{
-		"PAL_BDGHIK", 720, 576, 25, 0, 1, 280, 1440, 1, 23, 311, 313,
-		336, 624, 625, 0, 1, 0, V4L2_STD_625_50,
-	},
-};
-
-/**
  * vpif_uservirt_to_phys : translate user/virtual address to phy address
  * @virtp: user/virtual address
  *
@@ -342,7 +329,7 @@
  * @dev_id: dev_id ptr
  *
  * It changes status of the captured buffer, takes next buffer from the queue
- * and sets its address in VPIF  registers
+ * and sets its address in VPIF registers
  */
 static irqreturn_t vpif_channel_isr(int irq, void *dev_id)
 {
@@ -435,24 +422,31 @@
 	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
 	struct vpif_params *vpifparams = &ch->vpifparams;
 	const struct vpif_channel_config_params *config;
-	struct vpif_channel_config_params *std_info;
+	struct vpif_channel_config_params *std_info = &vpifparams->std_info;
 	struct video_obj *vid_ch = &ch->video;
 	int index;
 
 	vpif_dbg(2, debug, "vpif_update_std_info\n");
 
-	std_info = &vpifparams->std_info;
-
-	for (index = 0; index < ARRAY_SIZE(ch_params); index++) {
+	for (index = 0; index < vpif_ch_params_count; index++) {
 		config = &ch_params[index];
-		if (config->stdid & vid_ch->stdid) {
-			memcpy(std_info, config, sizeof(*config));
-			break;
+		if (config->hd_sd == 0) {
+			vpif_dbg(2, debug, "SD format\n");
+			if (config->stdid & vid_ch->stdid) {
+				memcpy(std_info, config, sizeof(*config));
+				break;
+			}
+		} else {
+			vpif_dbg(2, debug, "HD format\n");
+			if (config->dv_preset == vid_ch->dv_preset) {
+				memcpy(std_info, config, sizeof(*config));
+				break;
+			}
 		}
 	}
 
 	/* standard not found */
-	if (index == ARRAY_SIZE(ch_params))
+	if (index == vpif_ch_params_count)
 		return -EINVAL;
 
 	common->fmt.fmt.pix.width = std_info->width;
@@ -462,6 +456,7 @@
 	common->fmt.fmt.pix.bytesperline = std_info->width;
 	vpifparams->video_params.hpitch = std_info->width;
 	vpifparams->video_params.storage_mode = std_info->frm_fmt;
+
 	return 0;
 }
 
@@ -757,7 +752,7 @@
 	struct video_obj *vid_ch;
 	struct channel_obj *ch;
 	struct vpif_fh *fh;
-	int i, ret = 0;
+	int i;
 
 	vpif_dbg(2, debug, "vpif_open\n");
 
@@ -766,9 +761,6 @@
 	vid_ch = &ch->video;
 	common = &ch->common[VPIF_VIDEO_INDEX];
 
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	if (NULL == ch->curr_subdev_info) {
 		/**
 		 * search through the sub device to see a registered
@@ -785,8 +777,7 @@
 		}
 		if (i == config->subdev_count) {
 			vpif_err("No sub device registered\n");
-			ret = -ENOENT;
-			goto exit;
+			return -ENOENT;
 		}
 	}
 
@@ -794,8 +785,7 @@
 	fh = kzalloc(sizeof(struct vpif_fh), GFP_KERNEL);
 	if (NULL == fh) {
 		vpif_err("unable to allocate memory for file handle object\n");
-		ret = -ENOMEM;
-		goto exit;
+		return -ENOMEM;
 	}
 
 	/* store pointer to fh in private_data member of filep */
@@ -815,9 +805,7 @@
 	/* Initialize priority of this instance to default priority */
 	fh->prio = V4L2_PRIORITY_UNSET;
 	v4l2_prio_open(&ch->prio, &fh->prio);
-exit:
-	mutex_unlock(&common->lock);
-	return ret;
+	return 0;
 }
 
 /**
@@ -837,9 +825,6 @@
 
 	common = &ch->common[VPIF_VIDEO_INDEX];
 
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	/* if this instance is doing IO */
 	if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
 		/* Reset io_usrs member of channel object */
@@ -863,9 +848,6 @@
 	/* Decrement channel usrs counter */
 	ch->usrs--;
 
-	/* unlock mutex on channel object */
-	mutex_unlock(&common->lock);
-
 	/* Close the priority */
 	v4l2_prio_close(&ch->prio, fh->prio);
 
@@ -890,7 +872,6 @@
 	struct channel_obj *ch = fh->channel;
 	struct common_obj *common;
 	u8 index = 0;
-	int ret = 0;
 
 	vpif_dbg(2, debug, "vpif_reqbufs\n");
 
@@ -913,13 +894,8 @@
 
 	common = &ch->common[index];
 
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
-	if (0 != common->io_usrs) {
-		ret = -EBUSY;
-		goto reqbuf_exit;
-	}
+	if (0 != common->io_usrs)
+		return -EBUSY;
 
 	/* Initialize videobuf queue as per the buffer type */
 	videobuf_queue_dma_contig_init(&common->buffer_queue,
@@ -928,7 +904,7 @@
 					    reqbuf->type,
 					    common->fmt.fmt.pix.field,
 					    sizeof(struct videobuf_buffer), fh,
-					    NULL);
+					    &common->lock);
 
 	/* Set io allowed member of file handle to TRUE */
 	fh->io_allowed[index] = 1;
@@ -939,11 +915,7 @@
 	INIT_LIST_HEAD(&common->dma_queue);
 
 	/* Allocate buffers */
-	ret = videobuf_reqbufs(&common->buffer_queue, reqbuf);
-
-reqbuf_exit:
-	mutex_unlock(&common->lock);
-	return ret;
+	return videobuf_reqbufs(&common->buffer_queue, reqbuf);
 }
 
 /**
@@ -1157,11 +1129,6 @@
 		return ret;
 	}
 
-	if (mutex_lock_interruptible(&common->lock)) {
-		ret = -ERESTARTSYS;
-		goto streamoff_exit;
-	}
-
 	/* If buffer queue is empty, return error */
 	if (list_empty(&common->dma_queue)) {
 		vpif_dbg(1, debug, "buffer queue is empty\n");
@@ -1240,13 +1207,10 @@
 		enable_channel1(1);
 	}
 	channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
-	mutex_unlock(&common->lock);
 	return ret;
 
 exit:
-	mutex_unlock(&common->lock);
-streamoff_exit:
-	ret = videobuf_streamoff(&common->buffer_queue);
+	videobuf_streamoff(&common->buffer_queue);
 	return ret;
 }
 
@@ -1284,9 +1248,6 @@
 		return -EINVAL;
 	}
 
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	/* disable channel */
 	if (VPIF_CHANNEL0_VIDEO == ch->channel_id) {
 		enable_channel0(0);
@@ -1304,8 +1265,6 @@
 	if (ret && (ret != -ENOIOCTLCMD))
 		vpif_dbg(1, debug, "stream off failed in subdev\n");
 
-	mutex_unlock(&common->lock);
-
 	return videobuf_streamoff(&common->buffer_queue);
 }
 
@@ -1381,21 +1340,16 @@
 {
 	struct vpif_fh *fh = priv;
 	struct channel_obj *ch = fh->channel;
-	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
 	int ret = 0;
 
 	vpif_dbg(2, debug, "vpif_querystd\n");
 
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	/* Call querystd function of decoder device */
 	ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video,
 				querystd, std_id);
 	if (ret < 0)
 		vpif_dbg(1, debug, "Failed to set standard for sub devices\n");
 
-	mutex_unlock(&common->lock);
 	return ret;
 }
 
@@ -1451,16 +1405,14 @@
 	fh->initialized = 1;
 
 	/* Call encoder subdevice function to set the standard */
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	ch->video.stdid = *std_id;
+	ch->video.dv_preset = V4L2_DV_INVALID;
+	memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
 
 	/* Get the information about the standard */
 	if (vpif_update_std_info(ch)) {
-		ret = -EINVAL;
 		vpif_err("Error getting the standard info\n");
-		goto s_std_exit;
+		return -EINVAL;
 	}
 
 	/* Configure the default format information */
@@ -1471,9 +1423,6 @@
 				s_std, *std_id);
 	if (ret < 0)
 		vpif_dbg(1, debug, "Failed to set standard for sub devices\n");
-
-s_std_exit:
-	mutex_unlock(&common->lock);
 	return ret;
 }
 
@@ -1567,9 +1516,6 @@
 		return -EINVAL;
 	}
 
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	/* first setup input path from sub device to vpif */
 	if (config->setup_input_path) {
 		ret = config->setup_input_path(ch->channel_id,
@@ -1578,7 +1524,7 @@
 			vpif_dbg(1, debug, "couldn't setup input path for the"
 				" sub device %s, for input index %d\n",
 				subdev_info->name, index);
-			goto exit;
+			return ret;
 		}
 	}
 
@@ -1589,7 +1535,7 @@
 					input, output, 0);
 		if (ret < 0) {
 			vpif_dbg(1, debug, "Failed to set input\n");
-			goto exit;
+			return ret;
 		}
 	}
 	vid_ch->input_idx = index;
@@ -1600,9 +1546,6 @@
 
 	/* update tvnorms from the sub device input info */
 	ch->video_dev->tvnorms = chan_cfg->inputs[index].input.std;
-
-exit:
-	mutex_unlock(&common->lock);
 	return ret;
 }
 
@@ -1671,11 +1614,7 @@
 		return -EINVAL;
 
 	/* Fill in the information about format */
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	*fmt = common->fmt;
-	mutex_unlock(&common->lock);
 	return 0;
 }
 
@@ -1694,7 +1633,7 @@
 	struct v4l2_pix_format *pixfmt;
 	int ret = 0;
 
-	vpif_dbg(2, debug, "VIDIOC_S_FMT\n");
+	vpif_dbg(2, debug, "%s\n", __func__);
 
 	/* If streaming is started, return error */
 	if (common->started) {
@@ -1723,12 +1662,7 @@
 	if (ret)
 		return ret;
 	/* store the format in the channel object */
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	common->fmt = *fmt;
-	mutex_unlock(&common->lock);
-
 	return 0;
 }
 
@@ -1807,6 +1741,306 @@
 	return 0;
 }
 
+/**
+ * vpif_enum_dv_presets() - ENUM_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_enum_dv_presets(struct file *file, void *priv,
+		struct v4l2_dv_enum_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
+			video, enum_dv_presets, preset);
+}
+
+/**
+ * vpif_query_dv_presets() - QUERY_DV_PRESET handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_query_dv_preset(struct file *file, void *priv,
+		struct v4l2_dv_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
+		       video, query_dv_preset, preset);
+}
+/**
+ * vpif_s_dv_presets() - S_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_s_dv_preset(struct file *file, void *priv,
+		struct v4l2_dv_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	int ret = 0;
+
+	if (common->started) {
+		vpif_dbg(1, debug, "streaming in progress\n");
+		return -EBUSY;
+	}
+
+	if ((VPIF_CHANNEL0_VIDEO == ch->channel_id) ||
+	    (VPIF_CHANNEL1_VIDEO == ch->channel_id)) {
+		if (!fh->initialized) {
+			vpif_dbg(1, debug, "Channel Busy\n");
+			return -EBUSY;
+		}
+	}
+
+	ret = v4l2_prio_check(&ch->prio, fh->prio);
+	if (ret)
+		return ret;
+
+	fh->initialized = 1;
+
+	/* Call encoder subdevice function to set the standard */
+	if (mutex_lock_interruptible(&common->lock))
+		return -ERESTARTSYS;
+
+	ch->video.dv_preset = preset->preset;
+	ch->video.stdid = V4L2_STD_UNKNOWN;
+	memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
+
+	/* Get the information about the standard */
+	if (vpif_update_std_info(ch)) {
+		vpif_dbg(1, debug, "Error getting the standard info\n");
+		ret = -EINVAL;
+	} else {
+		/* Configure the default format information */
+		vpif_config_format(ch);
+
+		ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
+				video, s_dv_preset, preset);
+	}
+
+	mutex_unlock(&common->lock);
+
+	return ret;
+}
+/**
+ * vpif_g_dv_presets() - G_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_g_dv_preset(struct file *file, void *priv,
+		struct v4l2_dv_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	preset->preset = ch->video.dv_preset;
+
+	return 0;
+}
+
+/**
+ * vpif_s_dv_timings() - S_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_s_dv_timings(struct file *file, void *priv,
+		struct v4l2_dv_timings *timings)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct vpif_params *vpifparams = &ch->vpifparams;
+	struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+	struct video_obj *vid_ch = &ch->video;
+	struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
+	int ret;
+
+	if (timings->type != V4L2_DV_BT_656_1120) {
+		vpif_dbg(2, debug, "Timing type not defined\n");
+		return -EINVAL;
+	}
+
+	/* Configure subdevice timings, if any */
+	ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index],
+			video, s_dv_timings, timings);
+	if (ret == -ENOIOCTLCMD) {
+		vpif_dbg(2, debug, "Custom DV timings not supported by "
+				"subdevice\n");
+		return -EINVAL;
+	}
+	if (ret < 0) {
+		vpif_dbg(2, debug, "Error setting custom DV timings\n");
+		return ret;
+	}
+
+	if (!(timings->bt.width && timings->bt.height &&
+				(timings->bt.hbackporch ||
+				 timings->bt.hfrontporch ||
+				 timings->bt.hsync) &&
+				timings->bt.vfrontporch &&
+				(timings->bt.vbackporch ||
+				 timings->bt.vsync))) {
+		vpif_dbg(2, debug, "Timings for width, height, "
+				"horizontal back porch, horizontal sync, "
+				"horizontal front porch, vertical back porch, "
+				"vertical sync and vertical back porch "
+				"must be defined\n");
+		return -EINVAL;
+	}
+
+	*bt = timings->bt;
+
+	/* Configure video port timings */
+
+	std_info->eav2sav = bt->hbackporch + bt->hfrontporch +
+		bt->hsync - 8;
+	std_info->sav2eav = bt->width;
+
+	std_info->l1 = 1;
+	std_info->l3 = bt->vsync + bt->vbackporch + 1;
+
+	if (bt->interlaced) {
+		if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) {
+			std_info->vsize = bt->height * 2 +
+				bt->vfrontporch + bt->vsync + bt->vbackporch +
+				bt->il_vfrontporch + bt->il_vsync +
+				bt->il_vbackporch;
+			std_info->l5 = std_info->vsize/2 -
+				(bt->vfrontporch - 1);
+			std_info->l7 = std_info->vsize/2 + 1;
+			std_info->l9 = std_info->l7 + bt->il_vsync +
+				bt->il_vbackporch + 1;
+			std_info->l11 = std_info->vsize -
+				(bt->il_vfrontporch - 1);
+		} else {
+			vpif_dbg(2, debug, "Required timing values for "
+					"interlaced BT format missing\n");
+			return -EINVAL;
+		}
+	} else {
+		std_info->vsize = bt->height + bt->vfrontporch +
+			bt->vsync + bt->vbackporch;
+		std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
+	}
+	strncpy(std_info->name, "Custom timings BT656/1120", VPIF_MAX_NAME);
+	std_info->width = bt->width;
+	std_info->height = bt->height;
+	std_info->frm_fmt = bt->interlaced ? 0 : 1;
+	std_info->ycmux_mode = 0;
+	std_info->capture_format = 0;
+	std_info->vbi_supported = 0;
+	std_info->hd_sd = 1;
+	std_info->stdid = 0;
+	std_info->dv_preset = V4L2_DV_INVALID;
+
+	vid_ch->stdid = 0;
+	vid_ch->dv_preset = V4L2_DV_INVALID;
+	return 0;
+}
+
+/**
+ * vpif_g_dv_timings() - G_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_g_dv_timings(struct file *file, void *priv,
+		struct v4l2_dv_timings *timings)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct video_obj *vid_ch = &ch->video;
+	struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
+
+	timings->bt = *bt;
+
+	return 0;
+}
+
+/*
+ * vpif_g_chip_ident() - Identify the chip
+ * @file: file ptr
+ * @priv: file handle
+ * @chip: chip identity
+ *
+ * Returns zero or -EINVAL if read operations fails.
+ */
+static int vpif_g_chip_ident(struct file *file, void *priv,
+		struct v4l2_dbg_chip_ident *chip)
+{
+	chip->ident = V4L2_IDENT_NONE;
+	chip->revision = 0;
+	if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER &&
+			chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR) {
+		vpif_dbg(2, debug, "match_type is invalid.\n");
+		return -EINVAL;
+	}
+
+	return v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 0, core,
+			g_chip_ident, chip);
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+/*
+ * vpif_dbg_g_register() - Read register
+ * @file: file ptr
+ * @priv: file handle
+ * @reg: register to be read
+ *
+ * Debugging only
+ * Returns zero or -EINVAL if read operations fails.
+ */
+static int vpif_dbg_g_register(struct file *file, void *priv,
+		struct v4l2_dbg_register *reg){
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core,
+			g_register, reg);
+}
+
+/*
+ * vpif_dbg_s_register() - Write to register
+ * @file: file ptr
+ * @priv: file handle
+ * @reg: register to be modified
+ *
+ * Debugging only
+ * Returns zero or -EINVAL if write operations fails.
+ */
+static int vpif_dbg_s_register(struct file *file, void *priv,
+		struct v4l2_dbg_register *reg){
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	return v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], core,
+			s_register, reg);
+}
+#endif
+
+/*
+ * vpif_log_status() - Status information
+ * @file: file ptr
+ * @priv: file handle
+ *
+ * Returns zero.
+ */
+static int vpif_log_status(struct file *filep, void *priv)
+{
+	/* status for sub devices */
+	v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status);
+
+	return 0;
+}
+
 /* vpif capture ioctl operations */
 static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
 	.vidioc_querycap        	= vpif_querycap,
@@ -1829,6 +2063,18 @@
 	.vidioc_streamon        	= vpif_streamon,
 	.vidioc_streamoff       	= vpif_streamoff,
 	.vidioc_cropcap         	= vpif_cropcap,
+	.vidioc_enum_dv_presets         = vpif_enum_dv_presets,
+	.vidioc_s_dv_preset             = vpif_s_dv_preset,
+	.vidioc_g_dv_preset             = vpif_g_dv_preset,
+	.vidioc_query_dv_preset         = vpif_query_dv_preset,
+	.vidioc_s_dv_timings            = vpif_s_dv_timings,
+	.vidioc_g_dv_timings            = vpif_g_dv_timings,
+	.vidioc_g_chip_ident		= vpif_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+	.vidioc_g_register		= vpif_dbg_g_register,
+	.vidioc_s_register		= vpif_dbg_s_register,
+#endif
+	.vidioc_log_status		= vpif_log_status,
 };
 
 /* vpif file operations */
@@ -1836,7 +2082,7 @@
 	.owner = THIS_MODULE,
 	.open = vpif_open,
 	.release = vpif_release,
-	.ioctl = video_ioctl2,
+	.unlocked_ioctl = video_ioctl2,
 	.mmap = vpif_mmap,
 	.poll = vpif_poll
 };
@@ -1979,6 +2225,7 @@
 		common = &(ch->common[VPIF_VIDEO_INDEX]);
 		spin_lock_init(&common->irqlock);
 		mutex_init(&common->lock);
+		ch->video_dev->lock = &common->lock;
 		/* Initialize prio member of channel object */
 		v4l2_prio_init(&ch->prio);
 		err = video_register_device(ch->video_dev,
@@ -2026,9 +2273,9 @@
 		if (vpif_obj.sd[i])
 			vpif_obj.sd[i]->grp_id = 1 << i;
 	}
-	v4l2_info(&vpif_obj.v4l2_dev, "DM646x VPIF Capture driver"
-		  " initialized\n");
 
+	v4l2_info(&vpif_obj.v4l2_dev,
+			"DM646x VPIF capture driver initialized\n");
 	return 0;
 
 probe_subdev_out:
diff --git a/drivers/media/video/davinci/vpif_capture.h b/drivers/media/video/davinci/vpif_capture.h
index 4e12ec8..7a4196d 100644
--- a/drivers/media/video/davinci/vpif_capture.h
+++ b/drivers/media/video/davinci/vpif_capture.h
@@ -59,6 +59,8 @@
 	enum v4l2_field buf_field;
 	/* Currently selected or default standard */
 	v4l2_std_id stdid;
+	u32 dv_preset;
+	struct v4l2_bt_timings bt_timings;
 	/* This is to track the last input that is passed to application */
 	u32 input_idx;
 };
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index 412c65d..cdf659a 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -38,6 +38,7 @@
 #include <media/adv7343.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ioctl.h>
+#include <media/v4l2-chip-ident.h>
 
 #include <mach/dm646x.h>
 
@@ -84,17 +85,6 @@
 static struct vpif_device vpif_obj = { {NULL} };
 static struct device *vpif_dev;
 
-static const struct vpif_channel_config_params ch_params[] = {
-	{
-		"NTSC", 720, 480, 30, 0, 1, 268, 1440, 1, 23, 263, 266,
-		286, 525, 525, 0, 1, 0, V4L2_STD_525_60,
-	},
-	{
-		"PAL", 720, 576, 25, 0, 1, 280, 1440, 1, 23, 311, 313,
-		336, 624, 625, 0, 1, 0, V4L2_STD_625_50,
-	},
-};
-
 /*
  * vpif_uservirt_to_phys: This function is used to convert user
  * space virtual address to physical address.
@@ -373,30 +363,54 @@
 	return IRQ_HANDLED;
 }
 
-static int vpif_get_std_info(struct channel_obj *ch)
+static int vpif_update_std_info(struct channel_obj *ch)
 {
-	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
 	struct video_obj *vid_ch = &ch->video;
 	struct vpif_params *vpifparams = &ch->vpifparams;
 	struct vpif_channel_config_params *std_info = &vpifparams->std_info;
 	const struct vpif_channel_config_params *config;
 
-	int index;
+	int i;
 
-	std_info->stdid = vid_ch->stdid;
-	if (!std_info->stdid)
-		return -1;
-
-	for (index = 0; index < ARRAY_SIZE(ch_params); index++) {
-		config = &ch_params[index];
-		if (config->stdid & std_info->stdid) {
-			memcpy(std_info, config, sizeof(*config));
-			break;
+	for (i = 0; i < vpif_ch_params_count; i++) {
+		config = &ch_params[i];
+		if (config->hd_sd == 0) {
+			vpif_dbg(2, debug, "SD format\n");
+			if (config->stdid & vid_ch->stdid) {
+				memcpy(std_info, config, sizeof(*config));
+				break;
+			}
+		} else {
+			vpif_dbg(2, debug, "HD format\n");
+			if (config->dv_preset == vid_ch->dv_preset) {
+				memcpy(std_info, config, sizeof(*config));
+				break;
+			}
 		}
 	}
 
-	if (index == ARRAY_SIZE(ch_params))
-		return -1;
+	if (i == vpif_ch_params_count) {
+		vpif_dbg(1, debug, "Format not found\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int vpif_update_resolution(struct channel_obj *ch)
+{
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	struct video_obj *vid_ch = &ch->video;
+	struct vpif_params *vpifparams = &ch->vpifparams;
+	struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+
+	if (!vid_ch->stdid && !vid_ch->dv_preset && !vid_ch->bt_timings.height)
+		return -EINVAL;
+
+	if (vid_ch->stdid || vid_ch->dv_preset) {
+		if (vpif_update_std_info(ch))
+			return -EINVAL;
+	}
 
 	common->fmt.fmt.pix.width = std_info->width;
 	common->fmt.fmt.pix.height = std_info->height;
@@ -404,8 +418,8 @@
 			common->fmt.fmt.pix.width, common->fmt.fmt.pix.height);
 
 	/* Set height and width paramateres */
-	ch->common[VPIF_VIDEO_INDEX].height = std_info->height;
-	ch->common[VPIF_VIDEO_INDEX].width = std_info->width;
+	common->height = std_info->height;
+	common->width = std_info->width;
 
 	return 0;
 }
@@ -516,10 +530,8 @@
 	else
 		sizeimage = config_params.channel_bufsize[ch->channel_id];
 
-	if (vpif_get_std_info(ch)) {
-		vpif_err("Error getting the standard info\n");
+	if (vpif_update_resolution(ch))
 		return -EINVAL;
-	}
 
 	hpitch = pixfmt->bytesperline;
 	vpitch = sizeimage / (hpitch * 2);
@@ -568,7 +580,10 @@
 static int vpif_mmap(struct file *filep, struct vm_area_struct *vma)
 {
 	struct vpif_fh *fh = filep->private_data;
-	struct common_obj *common = &fh->channel->common[VPIF_VIDEO_INDEX];
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]);
+
+	vpif_dbg(2, debug, "vpif_mmap\n");
 
 	return videobuf_mmap_mapper(&common->buffer_queue, vma);
 }
@@ -637,9 +652,6 @@
 	struct channel_obj *ch = fh->channel;
 	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
 
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	/* if this instance is doing IO */
 	if (fh->io_allowed[VPIF_VIDEO_INDEX]) {
 		/* Reset io_usrs member of channel object */
@@ -662,8 +674,6 @@
 		    config_params.numbuffers[ch->channel_id];
 	}
 
-	mutex_unlock(&common->lock);
-
 	/* Decrement channel usrs counter */
 	atomic_dec(&ch->usrs);
 	/* If this file handle has initialize encoder device, reset it */
@@ -680,7 +690,12 @@
 }
 
 /* functions implementing ioctls */
-
+/**
+ * vpif_querycap() - QUERYCAP handler
+ * @file: file ptr
+ * @priv: file handle
+ * @cap: ptr to v4l2_capability structure
+ */
 static int vpif_querycap(struct file *file, void  *priv,
 				struct v4l2_capability *cap)
 {
@@ -722,17 +737,9 @@
 	if (common->fmt.type != fmt->type)
 		return -EINVAL;
 
-	/* Fill in the information about format */
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
-	if (vpif_get_std_info(ch)) {
-		vpif_err("Error getting the standard info\n");
+	if (vpif_update_resolution(ch))
 		return -EINVAL;
-	}
-
 	*fmt = common->fmt;
-	mutex_unlock(&common->lock);
 	return 0;
 }
 
@@ -773,12 +780,7 @@
 	/* store the pix format in the channel object */
 	common->fmt.fmt.pix = *pixfmt;
 	/* store the format in the channel object */
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	common->fmt = *fmt;
-	mutex_unlock(&common->lock);
-
 	return 0;
 }
 
@@ -808,7 +810,6 @@
 	struct common_obj *common;
 	enum v4l2_field field;
 	u8 index = 0;
-	int ret = 0;
 
 	/* This file handle has not initialized the channel,
 	   It is not allowed to do settings */
@@ -826,18 +827,12 @@
 	index = VPIF_VIDEO_INDEX;
 
 	common = &ch->common[index];
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
 
-	if (common->fmt.type != reqbuf->type) {
-		ret = -EINVAL;
-		goto reqbuf_exit;
-	}
+	if (common->fmt.type != reqbuf->type)
+		return -EINVAL;
 
-	if (0 != common->io_usrs) {
-		ret = -EBUSY;
-		goto reqbuf_exit;
-	}
+	if (0 != common->io_usrs)
+		return -EBUSY;
 
 	if (reqbuf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
 		if (common->fmt.fmt.pix.field == V4L2_FIELD_ANY)
@@ -854,7 +849,7 @@
 					    &common->irqlock,
 					    reqbuf->type, field,
 					    sizeof(struct videobuf_buffer), fh,
-					    NULL);
+					    &common->lock);
 
 	/* Set io allowed member of file handle to TRUE */
 	fh->io_allowed[index] = 1;
@@ -865,11 +860,7 @@
 	INIT_LIST_HEAD(&common->dma_queue);
 
 	/* Allocate buffers */
-	ret = videobuf_reqbufs(&common->buffer_queue, reqbuf);
-
-reqbuf_exit:
-	mutex_unlock(&common->lock);
-	return ret;
+	return videobuf_reqbufs(&common->buffer_queue, reqbuf);
 }
 
 static int vpif_querybuf(struct file *file, void *priv,
@@ -990,22 +981,19 @@
 	}
 
 	/* Call encoder subdevice function to set the standard */
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	ch->video.stdid = *std_id;
+	ch->video.dv_preset = V4L2_DV_INVALID;
+	memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
+
 	/* Get the information about the standard */
-	if (vpif_get_std_info(ch)) {
-		vpif_err("Error getting the standard info\n");
+	if (vpif_update_resolution(ch))
 		return -EINVAL;
-	}
 
 	if ((ch->vpifparams.std_info.width *
 		ch->vpifparams.std_info.height * 2) >
 		config_params.channel_bufsize[ch->channel_id]) {
 		vpif_err("invalid std for this size\n");
-		ret = -EINVAL;
-		goto s_std_exit;
+		return -EINVAL;
 	}
 
 	common->fmt.fmt.pix.bytesperline = common->fmt.fmt.pix.width;
@@ -1016,16 +1004,13 @@
 						s_std_output, *std_id);
 	if (ret < 0) {
 		vpif_err("Failed to set output standard\n");
-		goto s_std_exit;
+		return ret;
 	}
 
 	ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, core,
 							s_std, *std_id);
 	if (ret < 0)
 		vpif_err("Failed to set standard for sub devices\n");
-
-s_std_exit:
-	mutex_unlock(&common->lock);
 	return ret;
 }
 
@@ -1090,21 +1075,17 @@
 	if (ret < 0)
 		return ret;
 
-	/* Call videobuf_streamon to start streaming  in videobuf */
+	/* Call videobuf_streamon to start streaming in videobuf */
 	ret = videobuf_streamon(&common->buffer_queue);
 	if (ret < 0) {
 		vpif_err("videobuf_streamon\n");
 		return ret;
 	}
 
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	/* If buffer queue is empty, return error */
 	if (list_empty(&common->dma_queue)) {
 		vpif_err("buffer queue is empty\n");
-		ret = -EIO;
-		goto streamon_exit;
+		return -EIO;
 	}
 
 	/* Get the next frame from the buffer queue */
@@ -1130,8 +1111,7 @@
 			|| (!ch->vpifparams.std_info.frm_fmt
 			&& (common->fmt.fmt.pix.field == V4L2_FIELD_NONE))) {
 			vpif_err("conflict in field format and std format\n");
-			ret = -EINVAL;
-			goto streamon_exit;
+			return -EINVAL;
 		}
 
 		/* clock settings */
@@ -1140,13 +1120,13 @@
 						ch->vpifparams.std_info.hd_sd);
 		if (ret < 0) {
 			vpif_err("can't set clock\n");
-			goto streamon_exit;
+			return ret;
 		}
 
 		/* set the parameters and addresses */
 		ret = vpif_set_video_params(vpif, ch->channel_id + 2);
 		if (ret < 0)
-			goto streamon_exit;
+			return ret;
 
 		common->started = ret;
 		vpif_config_addr(ch, ret);
@@ -1171,9 +1151,6 @@
 		}
 		channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
 	}
-
-streamon_exit:
-	mutex_unlock(&common->lock);
 	return ret;
 }
 
@@ -1199,9 +1176,6 @@
 		return -EINVAL;
 	}
 
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	if (buftype == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
 		/* disable channel */
 		if (VPIF_CHANNEL2_VIDEO == ch->channel_id) {
@@ -1216,8 +1190,6 @@
 	}
 
 	common->started = 0;
-	mutex_unlock(&common->lock);
-
 	return videobuf_streamoff(&common->buffer_queue);
 }
 
@@ -1264,13 +1236,9 @@
 	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
 	int ret = 0;
 
-	if (mutex_lock_interruptible(&common->lock))
-		return -ERESTARTSYS;
-
 	if (common->started) {
 		vpif_err("Streaming in progress\n");
-		ret = -EBUSY;
-		goto s_output_exit;
+		return -EBUSY;
 	}
 
 	ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, video,
@@ -1280,9 +1248,6 @@
 		vpif_err("Failed to set output standard\n");
 
 	vid_ch->output_id = i;
-
-s_output_exit:
-	mutex_unlock(&common->lock);
 	return ret;
 }
 
@@ -1315,6 +1280,287 @@
 	return v4l2_prio_change(&ch->prio, &fh->prio, p);
 }
 
+/**
+ * vpif_enum_dv_presets() - ENUM_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_enum_dv_presets(struct file *file, void *priv,
+		struct v4l2_dv_enum_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct video_obj *vid_ch = &ch->video;
+
+	return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id],
+			video, enum_dv_presets, preset);
+}
+
+/**
+ * vpif_s_dv_presets() - S_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_s_dv_preset(struct file *file, void *priv,
+		struct v4l2_dv_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
+	struct video_obj *vid_ch = &ch->video;
+	int ret = 0;
+
+	if (common->started) {
+		vpif_dbg(1, debug, "streaming in progress\n");
+		return -EBUSY;
+	}
+
+	ret = v4l2_prio_check(&ch->prio, fh->prio);
+	if (ret != 0)
+		return ret;
+
+	fh->initialized = 1;
+
+	/* Call encoder subdevice function to set the standard */
+	if (mutex_lock_interruptible(&common->lock))
+		return -ERESTARTSYS;
+
+	ch->video.dv_preset = preset->preset;
+	ch->video.stdid = V4L2_STD_UNKNOWN;
+	memset(&ch->video.bt_timings, 0, sizeof(ch->video.bt_timings));
+
+	/* Get the information about the standard */
+	if (vpif_update_resolution(ch)) {
+		ret = -EINVAL;
+	} else {
+		/* Configure the default format information */
+		vpif_config_format(ch);
+
+		ret = v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id],
+				video, s_dv_preset, preset);
+	}
+
+	mutex_unlock(&common->lock);
+
+	return ret;
+}
+/**
+ * vpif_g_dv_presets() - G_DV_PRESETS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @preset: input preset
+ */
+static int vpif_g_dv_preset(struct file *file, void *priv,
+		struct v4l2_dv_preset *preset)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+
+	preset->preset = ch->video.dv_preset;
+
+	return 0;
+}
+/**
+ * vpif_s_dv_timings() - S_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_s_dv_timings(struct file *file, void *priv,
+		struct v4l2_dv_timings *timings)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct vpif_params *vpifparams = &ch->vpifparams;
+	struct vpif_channel_config_params *std_info = &vpifparams->std_info;
+	struct video_obj *vid_ch = &ch->video;
+	struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
+	int ret;
+
+	if (timings->type != V4L2_DV_BT_656_1120) {
+		vpif_dbg(2, debug, "Timing type not defined\n");
+		return -EINVAL;
+	}
+
+	/* Configure subdevice timings, if any */
+	ret = v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id],
+			video, s_dv_timings, timings);
+	if (ret == -ENOIOCTLCMD) {
+		vpif_dbg(2, debug, "Custom DV timings not supported by "
+				"subdevice\n");
+		return -EINVAL;
+	}
+	if (ret < 0) {
+		vpif_dbg(2, debug, "Error setting custom DV timings\n");
+		return ret;
+	}
+
+	if (!(timings->bt.width && timings->bt.height &&
+				(timings->bt.hbackporch ||
+				 timings->bt.hfrontporch ||
+				 timings->bt.hsync) &&
+				timings->bt.vfrontporch &&
+				(timings->bt.vbackporch ||
+				 timings->bt.vsync))) {
+		vpif_dbg(2, debug, "Timings for width, height, "
+				"horizontal back porch, horizontal sync, "
+				"horizontal front porch, vertical back porch, "
+				"vertical sync and vertical back porch "
+				"must be defined\n");
+		return -EINVAL;
+	}
+
+	*bt = timings->bt;
+
+	/* Configure video port timings */
+
+	std_info->eav2sav = bt->hbackporch + bt->hfrontporch +
+		bt->hsync - 8;
+	std_info->sav2eav = bt->width;
+
+	std_info->l1 = 1;
+	std_info->l3 = bt->vsync + bt->vbackporch + 1;
+
+	if (bt->interlaced) {
+		if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) {
+			std_info->vsize = bt->height * 2 +
+				bt->vfrontporch + bt->vsync + bt->vbackporch +
+				bt->il_vfrontporch + bt->il_vsync +
+				bt->il_vbackporch;
+			std_info->l5 = std_info->vsize/2 -
+				(bt->vfrontporch - 1);
+			std_info->l7 = std_info->vsize/2 + 1;
+			std_info->l9 = std_info->l7 + bt->il_vsync +
+				bt->il_vbackporch + 1;
+			std_info->l11 = std_info->vsize -
+				(bt->il_vfrontporch - 1);
+		} else {
+			vpif_dbg(2, debug, "Required timing values for "
+					"interlaced BT format missing\n");
+			return -EINVAL;
+		}
+	} else {
+		std_info->vsize = bt->height + bt->vfrontporch +
+			bt->vsync + bt->vbackporch;
+		std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
+	}
+	strncpy(std_info->name, "Custom timings BT656/1120",
+			VPIF_MAX_NAME);
+	std_info->width = bt->width;
+	std_info->height = bt->height;
+	std_info->frm_fmt = bt->interlaced ? 0 : 1;
+	std_info->ycmux_mode = 0;
+	std_info->capture_format = 0;
+	std_info->vbi_supported = 0;
+	std_info->hd_sd = 1;
+	std_info->stdid = 0;
+	std_info->dv_preset = V4L2_DV_INVALID;
+
+	vid_ch->stdid = 0;
+	vid_ch->dv_preset = V4L2_DV_INVALID;
+
+	return 0;
+}
+
+/**
+ * vpif_g_dv_timings() - G_DV_TIMINGS handler
+ * @file: file ptr
+ * @priv: file handle
+ * @timings: digital video timings
+ */
+static int vpif_g_dv_timings(struct file *file, void *priv,
+		struct v4l2_dv_timings *timings)
+{
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct video_obj *vid_ch = &ch->video;
+	struct v4l2_bt_timings *bt = &vid_ch->bt_timings;
+
+	timings->bt = *bt;
+
+	return 0;
+}
+
+/*
+ * vpif_g_chip_ident() - Identify the chip
+ * @file: file ptr
+ * @priv: file handle
+ * @chip: chip identity
+ *
+ * Returns zero or -EINVAL if read operations fails.
+ */
+static int vpif_g_chip_ident(struct file *file, void *priv,
+		struct v4l2_dbg_chip_ident *chip)
+{
+	chip->ident = V4L2_IDENT_NONE;
+	chip->revision = 0;
+	if (chip->match.type != V4L2_CHIP_MATCH_I2C_DRIVER &&
+			chip->match.type != V4L2_CHIP_MATCH_I2C_ADDR) {
+		vpif_dbg(2, debug, "match_type is invalid.\n");
+		return -EINVAL;
+	}
+
+	return v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 0, core,
+			g_chip_ident, chip);
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+/*
+ * vpif_dbg_g_register() - Read register
+ * @file: file ptr
+ * @priv: file handle
+ * @reg: register to be read
+ *
+ * Debugging only
+ * Returns zero or -EINVAL if read operations fails.
+ */
+static int vpif_dbg_g_register(struct file *file, void *priv,
+		struct v4l2_dbg_register *reg){
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct video_obj *vid_ch = &ch->video;
+
+	return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id], core,
+			g_register, reg);
+}
+
+/*
+ * vpif_dbg_s_register() - Write to register
+ * @file: file ptr
+ * @priv: file handle
+ * @reg: register to be modified
+ *
+ * Debugging only
+ * Returns zero or -EINVAL if write operations fails.
+ */
+static int vpif_dbg_s_register(struct file *file, void *priv,
+		struct v4l2_dbg_register *reg){
+	struct vpif_fh *fh = priv;
+	struct channel_obj *ch = fh->channel;
+	struct video_obj *vid_ch = &ch->video;
+
+	return v4l2_subdev_call(vpif_obj.sd[vid_ch->output_id], core,
+			s_register, reg);
+}
+#endif
+
+/*
+ * vpif_log_status() - Status information
+ * @file: file ptr
+ * @priv: file handle
+ *
+ * Returns zero.
+ */
+static int vpif_log_status(struct file *filep, void *priv)
+{
+	/* status for sub devices */
+	v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status);
+
+	return 0;
+}
+
 /* vpif display ioctl operations */
 static const struct v4l2_ioctl_ops vpif_ioctl_ops = {
 	.vidioc_querycap        	= vpif_querycap,
@@ -1336,13 +1582,24 @@
 	.vidioc_s_output		= vpif_s_output,
 	.vidioc_g_output		= vpif_g_output,
 	.vidioc_cropcap         	= vpif_cropcap,
+	.vidioc_enum_dv_presets         = vpif_enum_dv_presets,
+	.vidioc_s_dv_preset             = vpif_s_dv_preset,
+	.vidioc_g_dv_preset             = vpif_g_dv_preset,
+	.vidioc_s_dv_timings            = vpif_s_dv_timings,
+	.vidioc_g_dv_timings            = vpif_g_dv_timings,
+	.vidioc_g_chip_ident		= vpif_g_chip_ident,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+	.vidioc_g_register		= vpif_dbg_g_register,
+	.vidioc_s_register		= vpif_dbg_s_register,
+#endif
+	.vidioc_log_status		= vpif_log_status,
 };
 
 static const struct v4l2_file_operations vpif_fops = {
 	.owner		= THIS_MODULE,
 	.open		= vpif_open,
 	.release	= vpif_release,
-	.ioctl		= video_ioctl2,
+	.unlocked_ioctl	= video_ioctl2,
 	.mmap		= vpif_mmap,
 	.poll		= vpif_poll
 };
@@ -1526,6 +1783,7 @@
 		v4l2_prio_init(&ch->prio);
 		ch->common[VPIF_VIDEO_INDEX].fmt.type =
 						V4L2_BUF_TYPE_VIDEO_OUTPUT;
+		ch->video_dev->lock = &common->lock;
 
 		/* register video device */
 		vpif_dbg(1, debug, "channel=%x,channel->video_dev=%x\n",
@@ -1565,6 +1823,8 @@
 			vpif_obj.sd[i]->grp_id = 1 << i;
 	}
 
+	v4l2_info(&vpif_obj.v4l2_dev,
+			"DM646x VPIF display driver initialized\n");
 	return 0;
 
 probe_subdev_out:
diff --git a/drivers/media/video/davinci/vpif_display.h b/drivers/media/video/davinci/vpif_display.h
index a2a7cd1..b53aaa8 100644
--- a/drivers/media/video/davinci/vpif_display.h
+++ b/drivers/media/video/davinci/vpif_display.h
@@ -67,6 +67,8 @@
 					 * most recent displayed frame only */
 	v4l2_std_id stdid;		/* Currently selected or default
 					 * standard */
+	u32 dv_preset;
+	struct v4l2_bt_timings bt_timings;
 	u32 output_id;			/* Current output id */
 };
 
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 099d5df..87f77a3 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -33,6 +33,7 @@
 #include <media/saa7115.h>
 #include <media/tvp5150.h>
 #include <media/tvaudio.h>
+#include <media/mt9v011.h>
 #include <media/i2c-addr.h>
 #include <media/tveeprom.h>
 #include <media/v4l2-common.h>
@@ -1917,11 +1918,6 @@
 	I2C_CLIENT_END
 };
 
-static unsigned short mt9v011_addrs[] = {
-	0xba >> 1,
-	I2C_CLIENT_END
-};
-
 static unsigned short msp3400_addrs[] = {
 	0x80 >> 1,
 	0x88 >> 1,
@@ -2437,6 +2433,7 @@
 		dev->init_data.ir_codes = RC_MAP_RC5_HAUPPAUGE_NEW;
 		dev->init_data.get_key = em28xx_get_key_em_haup;
 		dev->init_data.name = "i2c IR (EM2840 Hauppauge)";
+		break;
 	case EM2820_BOARD_LEADTEK_WINFAST_USBII_DELUXE:
 		dev->init_data.ir_codes = RC_MAP_WINFAST_USBII_DELUXE;
 		dev->init_data.get_key = em28xx_get_key_winfast_usbii_deluxe;
@@ -2623,11 +2620,17 @@
 			"tvp5150", 0, tvp5150_addrs);
 
 	if (dev->em28xx_sensor == EM28XX_MT9V011) {
+		struct mt9v011_platform_data pdata;
+		struct i2c_board_info mt9v011_info = {
+			.type = "mt9v011",
+			.addr = 0xba >> 1,
+			.platform_data = &pdata,
+		};
 		struct v4l2_subdev *sd;
 
-		sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
-			 &dev->i2c_adap, "mt9v011", 0, mt9v011_addrs);
-		v4l2_subdev_call(sd, core, s_config, 0, &dev->sensor_xtal);
+		pdata.xtal = dev->sensor_xtal;
+		sd = v4l2_i2c_new_subdev_board(&dev->v4l2_dev, &dev->i2c_adap,
+				&mt9v011_info, NULL);
 	}
 
 
diff --git a/drivers/media/video/et61x251/et61x251.h b/drivers/media/video/et61x251/et61x251.h
index cc77d14..bf66189 100644
--- a/drivers/media/video/et61x251/et61x251.h
+++ b/drivers/media/video/et61x251/et61x251.h
@@ -59,31 +59,7 @@
 /*****************************************************************************/
 
 static const struct usb_device_id et61x251_id_table[] = {
-	{ USB_DEVICE(0x102c, 0x6151), },
 	{ USB_DEVICE(0x102c, 0x6251), },
-	{ USB_DEVICE(0x102c, 0x6253), },
-	{ USB_DEVICE(0x102c, 0x6254), },
-	{ USB_DEVICE(0x102c, 0x6255), },
-	{ USB_DEVICE(0x102c, 0x6256), },
-	{ USB_DEVICE(0x102c, 0x6257), },
-	{ USB_DEVICE(0x102c, 0x6258), },
-	{ USB_DEVICE(0x102c, 0x6259), },
-	{ USB_DEVICE(0x102c, 0x625a), },
-	{ USB_DEVICE(0x102c, 0x625b), },
-	{ USB_DEVICE(0x102c, 0x625c), },
-	{ USB_DEVICE(0x102c, 0x625d), },
-	{ USB_DEVICE(0x102c, 0x625e), },
-	{ USB_DEVICE(0x102c, 0x625f), },
-	{ USB_DEVICE(0x102c, 0x6260), },
-	{ USB_DEVICE(0x102c, 0x6261), },
-	{ USB_DEVICE(0x102c, 0x6262), },
-	{ USB_DEVICE(0x102c, 0x6263), },
-	{ USB_DEVICE(0x102c, 0x6264), },
-	{ USB_DEVICE(0x102c, 0x6265), },
-	{ USB_DEVICE(0x102c, 0x6266), },
-	{ USB_DEVICE(0x102c, 0x6267), },
-	{ USB_DEVICE(0x102c, 0x6268), },
-	{ USB_DEVICE(0x102c, 0x6269), },
 	{ }
 };
 
diff --git a/drivers/media/video/gspca/benq.c b/drivers/media/video/gspca/benq.c
index 6290439..a09c470 100644
--- a/drivers/media/video/gspca/benq.c
+++ b/drivers/media/video/gspca/benq.c
@@ -276,7 +276,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x04a5, 0x3035)},
 	{}
 };
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index 1eacb6c..8b39849 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -1040,14 +1040,14 @@
 };
 
 /* -- module initialisation -- */
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x0572, 0x0041)},
 	{}
 };
 MODULE_DEVICE_TABLE(usb, device_table);
 
 /* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
+static int sd_probe(struct usb_interface *intf,
 			const struct usb_device_id *id)
 {
 	return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/cpia1.c b/drivers/media/video/gspca/cpia1.c
index c1ae05f..4bf2cab 100644
--- a/drivers/media/video/gspca/cpia1.c
+++ b/drivers/media/video/gspca/cpia1.c
@@ -2088,7 +2088,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x0553, 0x0002)},
 	{USB_DEVICE(0x0813, 0x0001)},
 	{}
diff --git a/drivers/media/video/gspca/etoms.c b/drivers/media/video/gspca/etoms.c
index a594b36..4b2c483 100644
--- a/drivers/media/video/gspca/etoms.c
+++ b/drivers/media/video/gspca/etoms.c
@@ -864,7 +864,7 @@
 };
 
 /* -- module initialisation -- */
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x102c, 0x6151), .driver_info = SENSOR_PAS106},
 #if !defined CONFIG_USB_ET61X251 && !defined CONFIG_USB_ET61X251_MODULE
 	{USB_DEVICE(0x102c, 0x6251), .driver_info = SENSOR_TAS5130CXX},
@@ -875,7 +875,7 @@
 MODULE_DEVICE_TABLE(usb, device_table);
 
 /* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
+static int sd_probe(struct usb_interface *intf,
 		    const struct usb_device_id *id)
 {
 	return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/finepix.c b/drivers/media/video/gspca/finepix.c
index d782264..987b4b6 100644
--- a/drivers/media/video/gspca/finepix.c
+++ b/drivers/media/video/gspca/finepix.c
@@ -229,7 +229,7 @@
 }
 
 /* Table of supported USB devices */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x04cb, 0x0104)},
 	{USB_DEVICE(0x04cb, 0x0109)},
 	{USB_DEVICE(0x04cb, 0x010b)},
diff --git a/drivers/media/video/gspca/gl860/gl860.c b/drivers/media/video/gspca/gl860/gl860.c
index b05bec7..9908303 100644
--- a/drivers/media/video/gspca/gl860/gl860.c
+++ b/drivers/media/video/gspca/gl860/gl860.c
@@ -488,7 +488,7 @@
 
 /*=================== USB driver structure initialisation ==================*/
 
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x05e3, 0x0503)},
 	{USB_DEVICE(0x05e3, 0xf191)},
 	{}
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 4429700..f21f2a2 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -55,7 +55,7 @@
 MODULE_DESCRIPTION("GSPCA USB Camera Driver");
 MODULE_LICENSE("GPL");
 
-#define DRIVER_VERSION_NUMBER	KERNEL_VERSION(2, 11, 0)
+#define DRIVER_VERSION_NUMBER	KERNEL_VERSION(2, 12, 0)
 
 #ifdef GSPCA_DEBUG
 int gspca_debug = D_ERR | D_PROBE;
@@ -508,8 +508,8 @@
 	return 0;
 }
 
-static int frame_alloc(struct gspca_dev *gspca_dev,
-			unsigned int count)
+static int frame_alloc(struct gspca_dev *gspca_dev, struct file *file,
+			enum v4l2_memory memory, unsigned int count)
 {
 	struct gspca_frame *frame;
 	unsigned int frsz;
@@ -519,7 +519,6 @@
 	frsz = gspca_dev->cam.cam_mode[i].sizeimage;
 	PDEBUG(D_STREAM, "frame alloc frsz: %d", frsz);
 	frsz = PAGE_ALIGN(frsz);
-	gspca_dev->frsz = frsz;
 	if (count >= GSPCA_MAX_FRAMES)
 		count = GSPCA_MAX_FRAMES - 1;
 	gspca_dev->frbuf = vmalloc_32(frsz * count);
@@ -527,6 +526,9 @@
 		err("frame alloc failed");
 		return -ENOMEM;
 	}
+	gspca_dev->capt_file = file;
+	gspca_dev->memory = memory;
+	gspca_dev->frsz = frsz;
 	gspca_dev->nframes = count;
 	for (i = 0; i < count; i++) {
 		frame = &gspca_dev->frame[i];
@@ -535,7 +537,7 @@
 		frame->v4l2_buf.flags = 0;
 		frame->v4l2_buf.field = V4L2_FIELD_NONE;
 		frame->v4l2_buf.length = frsz;
-		frame->v4l2_buf.memory = gspca_dev->memory;
+		frame->v4l2_buf.memory = memory;
 		frame->v4l2_buf.sequence = 0;
 		frame->data = gspca_dev->frbuf + i * frsz;
 		frame->v4l2_buf.m.offset = i * frsz;
@@ -558,6 +560,9 @@
 			gspca_dev->frame[i].data = NULL;
 	}
 	gspca_dev->nframes = 0;
+	gspca_dev->frsz = 0;
+	gspca_dev->capt_file = NULL;
+	gspca_dev->memory = GSPCA_MEMORY_NO;
 }
 
 static void destroy_urbs(struct gspca_dev *gspca_dev)
@@ -1210,29 +1215,15 @@
 static int dev_open(struct file *file)
 {
 	struct gspca_dev *gspca_dev;
-	int ret;
 
 	PDEBUG(D_STREAM, "[%s] open", current->comm);
 	gspca_dev = (struct gspca_dev *) video_devdata(file);
-	if (mutex_lock_interruptible(&gspca_dev->queue_lock))
-		return -ERESTARTSYS;
-	if (!gspca_dev->present) {
-		ret = -ENODEV;
-		goto out;
-	}
-
-	if (gspca_dev->users > 4) {	/* (arbitrary value) */
-		ret = -EBUSY;
-		goto out;
-	}
+	if (!gspca_dev->present)
+		return -ENODEV;
 
 	/* protect the subdriver against rmmod */
-	if (!try_module_get(gspca_dev->module)) {
-		ret = -ENODEV;
-		goto out;
-	}
-
-	gspca_dev->users++;
+	if (!try_module_get(gspca_dev->module))
+		return -ENODEV;
 
 	file->private_data = gspca_dev;
 #ifdef GSPCA_DEBUG
@@ -1244,14 +1235,7 @@
 		gspca_dev->vdev.debug &= ~(V4L2_DEBUG_IOCTL
 					| V4L2_DEBUG_IOCTL_ARG);
 #endif
-	ret = 0;
-out:
-	mutex_unlock(&gspca_dev->queue_lock);
-	if (ret != 0)
-		PDEBUG(D_ERR|D_STREAM, "open failed err %d", ret);
-	else
-		PDEBUG(D_STREAM, "open done");
-	return ret;
+	return 0;
 }
 
 static int dev_close(struct file *file)
@@ -1261,7 +1245,6 @@
 	PDEBUG(D_STREAM, "[%s] close", current->comm);
 	if (mutex_lock_interruptible(&gspca_dev->queue_lock))
 		return -ERESTARTSYS;
-	gspca_dev->users--;
 
 	/* if the file did the capture, free the streaming resources */
 	if (gspca_dev->capt_file == file) {
@@ -1272,8 +1255,6 @@
 			mutex_unlock(&gspca_dev->usb_lock);
 		}
 		frame_free(gspca_dev);
-		gspca_dev->capt_file = NULL;
-		gspca_dev->memory = GSPCA_MEMORY_NO;
 	}
 	file->private_data = NULL;
 	module_put(gspca_dev->module);
@@ -1516,6 +1497,7 @@
 		return -ERESTARTSYS;
 
 	if (gspca_dev->memory != GSPCA_MEMORY_NO
+	    && gspca_dev->memory != GSPCA_MEMORY_READ
 	    && gspca_dev->memory != rb->memory) {
 		ret = -EBUSY;
 		goto out;
@@ -1544,19 +1526,18 @@
 		gspca_stream_off(gspca_dev);
 		mutex_unlock(&gspca_dev->usb_lock);
 	}
+	/* Don't restart the stream when switching from read to mmap mode */
+	if (gspca_dev->memory == GSPCA_MEMORY_READ)
+		streaming = 0;
 
 	/* free the previous allocated buffers, if any */
-	if (gspca_dev->nframes != 0) {
+	if (gspca_dev->nframes != 0)
 		frame_free(gspca_dev);
-		gspca_dev->capt_file = NULL;
-	}
 	if (rb->count == 0)			/* unrequest */
 		goto out;
-	gspca_dev->memory = rb->memory;
-	ret = frame_alloc(gspca_dev, rb->count);
+	ret = frame_alloc(gspca_dev, file, rb->memory, rb->count);
 	if (ret == 0) {
 		rb->count = gspca_dev->nframes;
-		gspca_dev->capt_file = file;
 		if (streaming)
 			ret = gspca_init_transfer(gspca_dev);
 	}
@@ -1630,11 +1611,15 @@
 
 	if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
 		return -EINVAL;
-	if (!gspca_dev->streaming)
-		return 0;
+
 	if (mutex_lock_interruptible(&gspca_dev->queue_lock))
 		return -ERESTARTSYS;
 
+	if (!gspca_dev->streaming) {
+		ret = 0;
+		goto out;
+	}
+
 	/* check the capture file */
 	if (gspca_dev->capt_file != file) {
 		ret = -EBUSY;
@@ -1649,6 +1634,8 @@
 	gspca_dev->usb_err = 0;
 	gspca_stream_off(gspca_dev);
 	mutex_unlock(&gspca_dev->usb_lock);
+	/* In case another thread is waiting in dqbuf */
+	wake_up_interruptible(&gspca_dev->wq);
 
 	/* empty the transfer queues */
 	atomic_set(&gspca_dev->fr_q, 0);
@@ -1827,43 +1814,29 @@
 	return ret;
 }
 
-/*
- * wait for a video frame
- *
- * If a frame is ready, its index is returned.
- */
-static int frame_wait(struct gspca_dev *gspca_dev,
-			int nonblock_ing)
+static int frame_ready_nolock(struct gspca_dev *gspca_dev, struct file *file,
+				enum v4l2_memory memory)
 {
-	int i, ret;
+	if (!gspca_dev->present)
+		return -ENODEV;
+	if (gspca_dev->capt_file != file || gspca_dev->memory != memory ||
+			!gspca_dev->streaming)
+		return -EINVAL;
 
 	/* check if a frame is ready */
-	i = gspca_dev->fr_o;
-	if (i == atomic_read(&gspca_dev->fr_i)) {
-		if (nonblock_ing)
-			return -EAGAIN;
+	return gspca_dev->fr_o != atomic_read(&gspca_dev->fr_i);
+}
 
-		/* wait till a frame is ready */
-		ret = wait_event_interruptible_timeout(gspca_dev->wq,
-			i != atomic_read(&gspca_dev->fr_i) ||
-			!gspca_dev->streaming || !gspca_dev->present,
-			msecs_to_jiffies(3000));
-		if (ret < 0)
-			return ret;
-		if (ret == 0 || !gspca_dev->streaming || !gspca_dev->present)
-			return -EIO;
-	}
+static int frame_ready(struct gspca_dev *gspca_dev, struct file *file,
+			enum v4l2_memory memory)
+{
+	int ret;
 
-	gspca_dev->fr_o = (i + 1) % GSPCA_MAX_FRAMES;
-
-	if (gspca_dev->sd_desc->dq_callback) {
-		mutex_lock(&gspca_dev->usb_lock);
-		gspca_dev->usb_err = 0;
-		if (gspca_dev->present)
-			gspca_dev->sd_desc->dq_callback(gspca_dev);
-		mutex_unlock(&gspca_dev->usb_lock);
-	}
-	return gspca_dev->fr_queue[i];
+	if (mutex_lock_interruptible(&gspca_dev->queue_lock))
+		return -ERESTARTSYS;
+	ret = frame_ready_nolock(gspca_dev, file, memory);
+	mutex_unlock(&gspca_dev->queue_lock);
+	return ret;
 }
 
 /*
@@ -1876,33 +1849,57 @@
 {
 	struct gspca_dev *gspca_dev = priv;
 	struct gspca_frame *frame;
-	int i, ret;
+	int i, j, ret;
 
 	PDEBUG(D_FRAM, "dqbuf");
-	if (v4l2_buf->memory != gspca_dev->memory)
-		return -EINVAL;
 
-	if (!gspca_dev->present)
-		return -ENODEV;
-
-	/* if not streaming, be sure the application will not loop forever */
-	if (!(file->f_flags & O_NONBLOCK)
-	    && !gspca_dev->streaming && gspca_dev->users == 1)
-		return -EINVAL;
-
-	/* only the capturing file may dequeue */
-	if (gspca_dev->capt_file != file)
-		return -EINVAL;
-
-	/* only one dequeue / read at a time */
-	if (mutex_lock_interruptible(&gspca_dev->read_lock))
+	if (mutex_lock_interruptible(&gspca_dev->queue_lock))
 		return -ERESTARTSYS;
 
-	ret = frame_wait(gspca_dev, file->f_flags & O_NONBLOCK);
-	if (ret < 0)
-		goto out;
-	i = ret;				/* frame index */
-	frame = &gspca_dev->frame[i];
+	for (;;) {
+		ret = frame_ready_nolock(gspca_dev, file, v4l2_buf->memory);
+		if (ret < 0)
+			goto out;
+		if (ret > 0)
+			break;
+
+		mutex_unlock(&gspca_dev->queue_lock);
+
+		if (file->f_flags & O_NONBLOCK)
+			return -EAGAIN;
+
+		/* wait till a frame is ready */
+		ret = wait_event_interruptible_timeout(gspca_dev->wq,
+			frame_ready(gspca_dev, file, v4l2_buf->memory),
+			msecs_to_jiffies(3000));
+		if (ret < 0)
+			return ret;
+		if (ret == 0)
+			return -EIO;
+
+		if (mutex_lock_interruptible(&gspca_dev->queue_lock))
+			return -ERESTARTSYS;
+	}
+
+	i = gspca_dev->fr_o;
+	j = gspca_dev->fr_queue[i];
+	frame = &gspca_dev->frame[j];
+
+	gspca_dev->fr_o = (i + 1) % GSPCA_MAX_FRAMES;
+
+	if (gspca_dev->sd_desc->dq_callback) {
+		mutex_lock(&gspca_dev->usb_lock);
+		gspca_dev->usb_err = 0;
+		if (gspca_dev->present)
+			gspca_dev->sd_desc->dq_callback(gspca_dev);
+		mutex_unlock(&gspca_dev->usb_lock);
+	}
+
+	frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_DONE;
+	memcpy(v4l2_buf, &frame->v4l2_buf, sizeof *v4l2_buf);
+	PDEBUG(D_FRAM, "dqbuf %d", j);
+	ret = 0;
+
 	if (gspca_dev->memory == V4L2_MEMORY_USERPTR) {
 		if (copy_to_user((__u8 __user *) frame->v4l2_buf.m.userptr,
 				 frame->data,
@@ -1910,15 +1907,10 @@
 			PDEBUG(D_ERR|D_STREAM,
 				"dqbuf cp to user failed");
 			ret = -EFAULT;
-			goto out;
 		}
 	}
-	frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_DONE;
-	memcpy(v4l2_buf, &frame->v4l2_buf, sizeof *v4l2_buf);
-	PDEBUG(D_FRAM, "dqbuf %d", i);
-	ret = 0;
 out:
-	mutex_unlock(&gspca_dev->read_lock);
+	mutex_unlock(&gspca_dev->queue_lock);
 	return ret;
 }
 
@@ -2033,9 +2025,7 @@
 	poll_wait(file, &gspca_dev->wq, wait);
 
 	/* if reqbufs is not done, the user would use read() */
-	if (gspca_dev->nframes == 0) {
-		if (gspca_dev->memory != GSPCA_MEMORY_NO)
-			return POLLERR;		/* not the 1st time */
+	if (gspca_dev->memory == GSPCA_MEMORY_NO) {
 		ret = read_alloc(gspca_dev, file);
 		if (ret != 0)
 			return POLLERR;
@@ -2067,18 +2057,10 @@
 	PDEBUG(D_FRAM, "read (%zd)", count);
 	if (!gspca_dev->present)
 		return -ENODEV;
-	switch (gspca_dev->memory) {
-	case GSPCA_MEMORY_NO:			/* first time */
+	if (gspca_dev->memory == GSPCA_MEMORY_NO) { /* first time ? */
 		ret = read_alloc(gspca_dev, file);
 		if (ret != 0)
 			return ret;
-		break;
-	case GSPCA_MEMORY_READ:
-		if (gspca_dev->capt_file == file)
-			break;
-		/* fall thru */
-	default:
-		return -EINVAL;
 	}
 
 	/* get a frame */
@@ -2266,7 +2248,6 @@
 		goto out;
 
 	mutex_init(&gspca_dev->usb_lock);
-	mutex_init(&gspca_dev->read_lock);
 	mutex_init(&gspca_dev->queue_lock);
 	init_waitqueue_head(&gspca_dev->wq);
 
@@ -2341,12 +2322,11 @@
 	PDEBUG(D_PROBE, "%s disconnect",
 		video_device_node_name(&gspca_dev->vdev));
 	mutex_lock(&gspca_dev->usb_lock);
-	gspca_dev->present = 0;
 
-	if (gspca_dev->streaming) {
-		destroy_urbs(gspca_dev);
-		wake_up_interruptible(&gspca_dev->wq);
-	}
+	gspca_dev->present = 0;
+	wake_up_interruptible(&gspca_dev->wq);
+
+	destroy_urbs(gspca_dev);
 
 #if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE)
 	gspca_input_destroy_urb(gspca_dev);
diff --git a/drivers/media/video/gspca/gspca.h b/drivers/media/video/gspca/gspca.h
index 97b77a2..4175522 100644
--- a/drivers/media/video/gspca/gspca.h
+++ b/drivers/media/video/gspca/gspca.h
@@ -205,14 +205,12 @@
 
 	wait_queue_head_t wq;		/* wait queue */
 	struct mutex usb_lock;		/* usb exchange protection */
-	struct mutex read_lock;		/* read protection */
 	struct mutex queue_lock;	/* ISOC queue protection */
 	int usb_err;			/* USB error - protected by usb_lock */
 	u16 pkt_size;			/* ISOC packet size */
 #ifdef CONFIG_PM
 	char frozen;			/* suspend - resume */
 #endif
-	char users;			/* number of opens */
 	char present;			/* device connected */
 	char nbufread;			/* number of buffers for read() */
 	char memory;			/* memory type (V4L2_MEMORY_xxx) */
diff --git a/drivers/media/video/gspca/jeilinj.c b/drivers/media/video/gspca/jeilinj.c
index a35e87b..06b777f 100644
--- a/drivers/media/video/gspca/jeilinj.c
+++ b/drivers/media/video/gspca/jeilinj.c
@@ -314,7 +314,7 @@
 }
 
 /* Table of supported USB devices */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x0979, 0x0280)},
 	{}
 };
diff --git a/drivers/media/video/gspca/jpeg.h b/drivers/media/video/gspca/jpeg.h
index de63c36..ab54910 100644
--- a/drivers/media/video/gspca/jpeg.h
+++ b/drivers/media/video/gspca/jpeg.h
@@ -141,9 +141,9 @@
 	memcpy(jpeg_hdr, jpeg_head, sizeof jpeg_head);
 #ifndef CONEX_CAM
 	jpeg_hdr[JPEG_HEIGHT_OFFSET + 0] = height >> 8;
-	jpeg_hdr[JPEG_HEIGHT_OFFSET + 1] = height & 0xff;
+	jpeg_hdr[JPEG_HEIGHT_OFFSET + 1] = height;
 	jpeg_hdr[JPEG_HEIGHT_OFFSET + 2] = width >> 8;
-	jpeg_hdr[JPEG_HEIGHT_OFFSET + 3] = width & 0xff;
+	jpeg_hdr[JPEG_HEIGHT_OFFSET + 3] = width;
 	jpeg_hdr[JPEG_HEIGHT_OFFSET + 6] = samplesY;
 #endif
 }
diff --git a/drivers/media/video/gspca/konica.c b/drivers/media/video/gspca/konica.c
index d2ce65d..5964691 100644
--- a/drivers/media/video/gspca/konica.c
+++ b/drivers/media/video/gspca/konica.c
@@ -607,7 +607,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x04c8, 0x0720)}, /* Intel YC 76 */
 	{}
 };
diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c
index c872b93..a7722b1 100644
--- a/drivers/media/video/gspca/m5602/m5602_core.c
+++ b/drivers/media/video/gspca/m5602/m5602_core.c
@@ -28,7 +28,7 @@
 static int dump_bridge;
 int dump_sensor;
 
-static const __devinitdata struct usb_device_id m5602_table[] = {
+static const struct usb_device_id m5602_table[] = {
 	{USB_DEVICE(0x0402, 0x5602)},
 	{}
 };
diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c
index a81536e..cb4d0bf 100644
--- a/drivers/media/video/gspca/mars.c
+++ b/drivers/media/video/gspca/mars.c
@@ -490,7 +490,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x093a, 0x050f)},
 	{}
 };
diff --git a/drivers/media/video/gspca/mr97310a.c b/drivers/media/video/gspca/mr97310a.c
index 7607a28..3884c9d 100644
--- a/drivers/media/video/gspca/mr97310a.c
+++ b/drivers/media/video/gspca/mr97310a.c
@@ -1229,7 +1229,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x08ca, 0x0110)},	/* Trust Spyc@m 100 */
 	{USB_DEVICE(0x08ca, 0x0111)},	/* Aiptek Pencam VGA+ */
 	{USB_DEVICE(0x093a, 0x010f)},	/* All other known MR97310A VGA cams */
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index e1c3b93..8ab2c45 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -488,7 +488,6 @@
 #define R511_SNAP_PXDIV			0x1c
 #define R511_SNAP_LNDIV			0x1d
 #define R511_SNAP_UV_EN			0x1e
-#define R511_SNAP_UV_EN			0x1e
 #define R511_SNAP_OPTS			0x1f
 
 #define R511_DRAM_FLOW_CTL		0x20
@@ -1847,8 +1846,7 @@
 	{ 0x6c, 0x0a },
 	{ 0x6d, 0x55 },
 	{ 0x6e, 0x11 },
-	{ 0x6f, 0x9f },
-					/* "9e for advance AWB" */
+	{ 0x6f, 0x9f },			/* "9e for advance AWB" */
 	{ 0x6a, 0x40 },
 	{ OV7670_R01_BLUE, 0x40 },
 	{ OV7670_R02_RED, 0x60 },
@@ -3054,7 +3052,7 @@
 {
 	static const struct ov_regvals init_519[] = {
 		{ 0x5a, 0x6d }, /* EnableSystem */
-		{ 0x53, 0x9b },
+		{ 0x53, 0x9b }, /* don't enable the microcontroller */
 		{ OV519_R54_EN_CLK1, 0xff }, /* set bit2 to enable jpeg */
 		{ 0x5d, 0x03 },
 		{ 0x49, 0x01 },
@@ -4747,7 +4745,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF },
 	{USB_DEVICE(0x041e, 0x4052), .driver_info = BRIDGE_OV519 },
 	{USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 0edf939..04da228 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -479,15 +479,20 @@
 	struct usb_device *udev = gspca_dev->dev;
 	int ret;
 
-	PDEBUG(D_USBO, "reg=0x%04x, val=0%02x", reg, val);
+	if (gspca_dev->usb_err < 0)
+		return;
+
+	PDEBUG(D_USBO, "SET 01 0000 %04x %02x", reg, val);
 	gspca_dev->usb_buf[0] = val;
 	ret = usb_control_msg(udev,
 			      usb_sndctrlpipe(udev, 0),
 			      0x01,
 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
 			      0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
-	if (ret < 0)
+	if (ret < 0) {
 		err("write failed %d", ret);
+		gspca_dev->usb_err = ret;
+	}
 }
 
 static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
@@ -495,14 +500,18 @@
 	struct usb_device *udev = gspca_dev->dev;
 	int ret;
 
+	if (gspca_dev->usb_err < 0)
+		return 0;
 	ret = usb_control_msg(udev,
 			      usb_rcvctrlpipe(udev, 0),
 			      0x01,
 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
 			      0x00, reg, gspca_dev->usb_buf, 1, CTRL_TIMEOUT);
-	PDEBUG(D_USBI, "reg=0x%04x, data=0x%02x", reg, gspca_dev->usb_buf[0]);
-	if (ret < 0)
+	PDEBUG(D_USBI, "GET 01 0000 %04x %02x", reg, gspca_dev->usb_buf[0]);
+	if (ret < 0) {
 		err("read failed %d", ret);
+		gspca_dev->usb_err = ret;
+	}
 	return gspca_dev->usb_buf[0];
 }
 
@@ -558,13 +567,15 @@
 
 static void sccb_reg_write(struct gspca_dev *gspca_dev, u8 reg, u8 val)
 {
-	PDEBUG(D_USBO, "reg: 0x%02x, val: 0x%02x", reg, val);
+	PDEBUG(D_USBO, "sccb write: %02x %02x", reg, val);
 	ov534_reg_write(gspca_dev, OV534_REG_SUBADDR, reg);
 	ov534_reg_write(gspca_dev, OV534_REG_WRITE, val);
 	ov534_reg_write(gspca_dev, OV534_REG_OPERATION, OV534_OP_WRITE_3);
 
-	if (!sccb_check_status(gspca_dev))
+	if (!sccb_check_status(gspca_dev)) {
 		err("sccb_reg_write failed");
+		gspca_dev->usb_err = -EIO;
+	}
 }
 
 static u8 sccb_reg_read(struct gspca_dev *gspca_dev, u16 reg)
@@ -885,7 +896,7 @@
 	ov534_set_led(gspca_dev, 0);
 	set_frame_rate(gspca_dev);
 
-	return 0;
+	return gspca_dev->usb_err;
 }
 
 static int sd_start(struct gspca_dev *gspca_dev)
@@ -920,7 +931,7 @@
 
 	ov534_set_led(gspca_dev, 1);
 	ov534_reg_write(gspca_dev, 0xe0, 0x00);
-	return 0;
+	return gspca_dev->usb_err;
 }
 
 static void sd_stopN(struct gspca_dev *gspca_dev)
@@ -1289,7 +1300,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x1415, 0x2000)},
 	{}
 };
diff --git a/drivers/media/video/gspca/ov534_9.c b/drivers/media/video/gspca/ov534_9.c
index c5244b4..aaf5428 100644
--- a/drivers/media/video/gspca/ov534_9.c
+++ b/drivers/media/video/gspca/ov534_9.c
@@ -1429,7 +1429,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x06f8, 0x3003)},
 	{}
 };
diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c
index 96f9986..81739a2 100644
--- a/drivers/media/video/gspca/pac207.c
+++ b/drivers/media/video/gspca/pac207.c
@@ -530,7 +530,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x041e, 0x4028)},
 	{USB_DEVICE(0x093a, 0x2460)},
 	{USB_DEVICE(0x093a, 0x2461)},
diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
index 2700975..5615d7b 100644
--- a/drivers/media/video/gspca/pac7302.c
+++ b/drivers/media/video/gspca/pac7302.c
@@ -1184,7 +1184,7 @@
 };
 
 /* -- module initialisation -- */
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x06f8, 0x3009)},
 	{USB_DEVICE(0x093a, 0x2620)},
 	{USB_DEVICE(0x093a, 0x2621)},
@@ -1201,7 +1201,7 @@
 MODULE_DEVICE_TABLE(usb, device_table);
 
 /* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
+static int sd_probe(struct usb_interface *intf,
 			const struct usb_device_id *id)
 {
 	return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index 6820f5d..f8801b5 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -837,7 +837,7 @@
 };
 
 /* -- module initialisation -- */
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x093a, 0x2600)},
 	{USB_DEVICE(0x093a, 0x2601)},
 	{USB_DEVICE(0x093a, 0x2603)},
@@ -849,7 +849,7 @@
 MODULE_DEVICE_TABLE(usb, device_table);
 
 /* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
+static int sd_probe(struct usb_interface *intf,
 			const struct usb_device_id *id)
 {
 	return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/sn9c2028.c b/drivers/media/video/gspca/sn9c2028.c
index 40a0668..4271f86 100644
--- a/drivers/media/video/gspca/sn9c2028.c
+++ b/drivers/media/video/gspca/sn9c2028.c
@@ -703,7 +703,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x0458, 0x7005)}, /* Genius Smart 300, version 2 */
 	/* The Genius Smart is untested. I can't find an owner ! */
 	/* {USB_DEVICE(0x0c45, 0x8000)}, DC31VC, Don't know this camera */
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index cb08d00..fcf2989 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -2470,7 +2470,7 @@
 			| (SENSOR_ ## sensor << 8) \
 			| (i2c_addr)
 
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x0c45, 0x6240), SN9C20X(MT9M001, 0x5d, 0)},
 	{USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, 0)},
 	{USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)},
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index 73504a3..c6cd68d 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -23,8 +23,15 @@
 /* Some documentation on known sonixb registers:
 
 Reg	Use
+sn9c101 / sn9c102:
 0x10	high nibble red gain low nibble blue gain
 0x11	low nibble green gain
+sn9c103:
+0x05	red gain 0-127
+0x06	blue gain 0-127
+0x07	green gain 0-127
+all:
+0x08-0x0f i2c / 3wire registers
 0x12	hstart
 0x13	vstart
 0x15	hsize (hsize = register-value * 16)
@@ -88,12 +95,9 @@
 typedef const __u8 sensor_init_t[8];
 
 struct sensor_data {
-	const __u8 *bridge_init[2];
-	int bridge_init_size[2];
+	const __u8 *bridge_init;
 	sensor_init_t *sensor_init;
 	int sensor_init_size;
-	sensor_init_t *sensor_bridge_init[2];
-	int sensor_bridge_init_size[2];
 	int flags;
 	unsigned ctrl_dis;
 	__u8 sensor_addr;
@@ -114,7 +118,6 @@
 #define NO_FREQ (1 << FREQ_IDX)
 #define NO_BRIGHTNESS (1 << BRIGHTNESS_IDX)
 
-#define COMP2 0x8f
 #define COMP 0xc7		/* 0x87 //0x07 */
 #define COMP1 0xc9		/* 0x89 //0x09 */
 
@@ -123,15 +126,11 @@
 
 #define SYS_CLK 0x04
 
-#define SENS(bridge_1, bridge_3, sensor, sensor_1, \
-	sensor_3, _flags, _ctrl_dis, _sensor_addr) \
+#define SENS(bridge, sensor, _flags, _ctrl_dis, _sensor_addr) \
 { \
-	.bridge_init = { bridge_1, bridge_3 }, \
-	.bridge_init_size = { sizeof(bridge_1), sizeof(bridge_3) }, \
+	.bridge_init = bridge, \
 	.sensor_init = sensor, \
 	.sensor_init_size = sizeof(sensor), \
-	.sensor_bridge_init = { sensor_1, sensor_3,}, \
-	.sensor_bridge_init_size = { sizeof(sensor_1), sizeof(sensor_3)}, \
 	.flags = _flags, .ctrl_dis = _ctrl_dis, .sensor_addr = _sensor_addr \
 }
 
@@ -311,7 +310,6 @@
 	0x00, 0x00,
 	0x00, 0x00, 0x00, 0x02, 0x02, 0x00,
 	0x28, 0x1e, 0x60, 0x8e, 0x42,
-	0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c
 };
 static const __u8 hv7131d_sensor_init[][8] = {
 	{0xa0, 0x11, 0x01, 0x04, 0x00, 0x00, 0x00, 0x17},
@@ -326,7 +324,6 @@
 	0x00, 0x00,
 	0x00, 0x00, 0x00, 0x02, 0x01, 0x00,
 	0x28, 0x1e, 0x60, 0x8a, 0x20,
-	0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c
 };
 static const __u8 hv7131r_sensor_init[][8] = {
 	{0xc0, 0x11, 0x31, 0x38, 0x2a, 0x2e, 0x00, 0x10},
@@ -339,7 +336,7 @@
 	0x44, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
 	0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 	0x00, 0x01, 0x01, 0x0a, 0x16, 0x12, 0x68, 0x8b,
-	0x10, 0x1d, 0x10, 0x02, 0x02, 0x09, 0x07
+	0x10,
 };
 static const __u8 ov6650_sensor_init[][8] = {
 	/* Bright, contrast, etc are set through SCBB interface.
@@ -378,24 +375,13 @@
 	0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,	/* r09 .. r10 */
 	0x00, 0x01, 0x01, 0x0a,				/* r11 .. r14 */
 	0x28, 0x1e,			/* H & V sizes     r15 .. r16 */
-	0x68, COMP2, MCK_INIT1,				/* r17 .. r19 */
-	0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c		/* r1a .. r1f */
-};
-static const __u8 initOv7630_3[] = {
-	0x44, 0x44, 0x00, 0x1a, 0x20, 0x20, 0x20, 0x80,	/* r01 .. r08 */
-	0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,	/* r09 .. r10 */
-	0x00, 0x02, 0x01, 0x0a,				/* r11 .. r14 */
-	0x28, 0x1e,			/* H & V sizes     r15 .. r16 */
 	0x68, 0x8f, MCK_INIT1,				/* r17 .. r19 */
-	0x1d, 0x10, 0x02, 0x03, 0x0f, 0x0c, 0x00,	/* r1a .. r20 */
-	0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70, 0x80, /* r21 .. r28 */
-	0x90, 0xa0, 0xb0, 0xc0, 0xd0, 0xe0, 0xf0, 0xff  /* r29 .. r30 */
 };
 static const __u8 ov7630_sensor_init[][8] = {
 	{0xa0, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10},
 	{0xb0, 0x21, 0x01, 0x77, 0x3a, 0x00, 0x00, 0x10},
 /*	{0xd0, 0x21, 0x12, 0x7c, 0x01, 0x80, 0x34, 0x10},	   jfm */
-	{0xd0, 0x21, 0x12, 0x1c, 0x00, 0x80, 0x34, 0x10},	/* jfm */
+	{0xd0, 0x21, 0x12, 0x5c, 0x00, 0x80, 0x34, 0x10},	/* jfm */
 	{0xa0, 0x21, 0x1b, 0x04, 0x00, 0x80, 0x34, 0x10},
 	{0xa0, 0x21, 0x20, 0x44, 0x00, 0x80, 0x34, 0x10},
 	{0xa0, 0x21, 0x23, 0xee, 0x00, 0x80, 0x34, 0x10},
@@ -413,16 +399,11 @@
 	{0xd0, 0x21, 0x17, 0x1c, 0xbd, 0x06, 0xf6, 0x10},
 };
 
-static const __u8 ov7630_sensor_init_3[][8] = {
-	{0xa0, 0x21, 0x13, 0x80, 0x00,	0x00, 0x00, 0x10},
-};
-
 static const __u8 initPas106[] = {
 	0x04, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x40, 0x00, 0x00, 0x00,
 	0x00, 0x00,
 	0x00, 0x00, 0x00, 0x04, 0x01, 0x00,
 	0x16, 0x12, 0x24, COMP1, MCK_INIT1,
-	0x18, 0x10, 0x02, 0x02, 0x09, 0x07
 };
 /* compression 0x86 mckinit1 0x2b */
 
@@ -496,7 +477,6 @@
 	0x00, 0x00,
 	0x00, 0x00, 0x00, 0x06, 0x03, 0x0a,
 	0x28, 0x1e, 0x20, 0x89, 0x20,
-	0x00, 0x00, 0x02, 0x03, 0x0f, 0x0c
 };
 
 /* "Known" PAS202BCB registers:
@@ -537,7 +517,6 @@
 	0x00, 0x00,
 	0x00, 0x00, 0x00, 0x45, 0x09, 0x0a,
 	0x16, 0x12, 0x60, 0x86, 0x2b,
-	0x14, 0x0a, 0x02, 0x02, 0x09, 0x07
 };
 /* Same as above, except a different hstart */
 static const __u8 initTas5110d[] = {
@@ -545,12 +524,19 @@
 	0x00, 0x00,
 	0x00, 0x00, 0x00, 0x41, 0x09, 0x0a,
 	0x16, 0x12, 0x60, 0x86, 0x2b,
-	0x14, 0x0a, 0x02, 0x02, 0x09, 0x07
 };
-static const __u8 tas5110_sensor_init[][8] = {
+/* tas5110c is 3 wire, tas5110d is 2 wire (regular i2c) */
+static const __u8 tas5110c_sensor_init[][8] = {
 	{0x30, 0x11, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x10},
 	{0x30, 0x11, 0x02, 0x20, 0xa9, 0x00, 0x00, 0x10},
-	{0xa0, 0x61, 0x9a, 0xca, 0x00, 0x00, 0x00, 0x17},
+};
+/* Known TAS5110D registers
+ * reg02: gain, bit order reversed!! 0 == max gain, 255 == min gain
+ * reg03: bit3: vflip, bit4: ~hflip, bit7: ~gainboost (~ == inverted)
+ *        Note: writing reg03 seems to only work when written together with 02
+ */
+static const __u8 tas5110d_sensor_init[][8] = {
+	{0xa0, 0x61, 0x9a, 0xca, 0x00, 0x00, 0x00, 0x17}, /* reset */
 };
 
 static const __u8 initTas5130[] = {
@@ -558,7 +544,6 @@
 	0x00, 0x00,
 	0x00, 0x00, 0x00, 0x68, 0x0c, 0x0a,
 	0x28, 0x1e, 0x60, COMP, MCK_INIT,
-	0x18, 0x10, 0x04, 0x03, 0x11, 0x0c
 };
 static const __u8 tas5130_sensor_init[][8] = {
 /*	{0x30, 0x11, 0x00, 0x40, 0x47, 0x00, 0x00, 0x10},
@@ -569,21 +554,18 @@
 };
 
 static struct sensor_data sensor_data[] = {
-SENS(initHv7131d, NULL, hv7131d_sensor_init, NULL, NULL, F_GAIN, NO_BRIGHTNESS|NO_FREQ, 0),
-SENS(initHv7131r, NULL, hv7131r_sensor_init, NULL, NULL, 0, NO_BRIGHTNESS|NO_EXPO|NO_FREQ, 0),
-SENS(initOv6650, NULL, ov6650_sensor_init, NULL, NULL, F_GAIN|F_SIF, 0, 0x60),
-SENS(initOv7630, initOv7630_3, ov7630_sensor_init, NULL, ov7630_sensor_init_3,
-	F_GAIN, 0, 0x21),
-SENS(initPas106, NULL, pas106_sensor_init, NULL, NULL, F_GAIN|F_SIF, NO_FREQ,
-	0),
-SENS(initPas202, initPas202, pas202_sensor_init, NULL, NULL, F_GAIN,
-	NO_FREQ, 0),
-SENS(initTas5110c, NULL, tas5110_sensor_init, NULL, NULL,
-	F_GAIN|F_SIF|F_COARSE_EXPO, NO_BRIGHTNESS|NO_FREQ, 0),
-SENS(initTas5110d, NULL, tas5110_sensor_init, NULL, NULL,
-	F_GAIN|F_SIF|F_COARSE_EXPO, NO_BRIGHTNESS|NO_FREQ, 0),
-SENS(initTas5130, NULL, tas5130_sensor_init, NULL, NULL, 0, NO_EXPO|NO_FREQ,
-	0),
+SENS(initHv7131d, hv7131d_sensor_init, F_GAIN, NO_BRIGHTNESS|NO_FREQ, 0),
+SENS(initHv7131r, hv7131r_sensor_init, 0, NO_BRIGHTNESS|NO_EXPO|NO_FREQ, 0),
+SENS(initOv6650, ov6650_sensor_init, F_GAIN|F_SIF, 0, 0x60),
+SENS(initOv7630, ov7630_sensor_init, F_GAIN, 0, 0x21),
+SENS(initPas106, pas106_sensor_init, F_GAIN|F_SIF, NO_FREQ, 0),
+SENS(initPas202, pas202_sensor_init, F_GAIN, NO_FREQ, 0),
+SENS(initTas5110c, tas5110c_sensor_init, F_GAIN|F_SIF|F_COARSE_EXPO,
+	NO_BRIGHTNESS|NO_FREQ, 0),
+SENS(initTas5110d, tas5110d_sensor_init, F_GAIN|F_SIF|F_COARSE_EXPO,
+	NO_BRIGHTNESS|NO_FREQ, 0),
+SENS(initTas5130, tas5130_sensor_init, F_GAIN,
+	NO_BRIGHTNESS|NO_EXPO|NO_FREQ, 0),
 };
 
 /* get one byte in gspca_dev->usb_buf */
@@ -655,7 +637,6 @@
 static void setbrightness(struct gspca_dev *gspca_dev)
 {
 	struct sd *sd = (struct sd *) gspca_dev;
-	__u8 value;
 
 	switch (sd->sensor) {
 	case  SENSOR_OV6650:
@@ -697,17 +678,6 @@
 			goto err;
 		break;
 	    }
-	case SENSOR_TAS5130CXX: {
-		__u8 i2c[] =
-			{0x30, 0x11, 0x02, 0x20, 0x70, 0x00, 0x00, 0x10};
-
-		value = 0xff - sd->brightness;
-		i2c[4] = value;
-		PDEBUG(D_CONF, "brightness %d : %d", value, i2c[4]);
-		if (i2c_w(gspca_dev, i2c) < 0)
-			goto err;
-		break;
-	    }
 	}
 	return;
 err:
@@ -733,7 +703,7 @@
 		break;
 	    }
 	case SENSOR_TAS5110C:
-	case SENSOR_TAS5110D: {
+	case SENSOR_TAS5130CXX: {
 		__u8 i2c[] =
 			{0x30, 0x11, 0x02, 0x20, 0x70, 0x00, 0x00, 0x10};
 
@@ -742,6 +712,23 @@
 			goto err;
 		break;
 	    }
+	case SENSOR_TAS5110D: {
+		__u8 i2c[] = {
+			0xb0, 0x61, 0x02, 0x00, 0x10, 0x00, 0x00, 0x17 };
+		gain = 255 - gain;
+		/* The bits in the register are the wrong way around!! */
+		i2c[3] |= (gain & 0x80) >> 7;
+		i2c[3] |= (gain & 0x40) >> 5;
+		i2c[3] |= (gain & 0x20) >> 3;
+		i2c[3] |= (gain & 0x10) >> 1;
+		i2c[3] |= (gain & 0x08) << 1;
+		i2c[3] |= (gain & 0x04) << 3;
+		i2c[3] |= (gain & 0x02) << 5;
+		i2c[3] |= (gain & 0x01) << 7;
+		if (i2c_w(gspca_dev, i2c) < 0)
+			goto err;
+		break;
+	    }
 
 	case SENSOR_OV6650:
 		gain >>= 1;
@@ -796,7 +783,7 @@
 {
 	struct sd *sd = (struct sd *) gspca_dev;
 	__u8 gain;
-	__u8 buf[2] = { 0, 0 };
+	__u8 buf[3] = { 0, 0, 0 };
 
 	if (sensor_data[sd->sensor].flags & F_GAIN) {
 		/* Use the sensor gain to do the actual gain */
@@ -804,13 +791,18 @@
 		return;
 	}
 
-	gain = sd->gain >> 4;
-
-	/* red and blue gain */
-	buf[0] = gain << 4 | gain;
-	/* green gain */
-	buf[1] = gain;
-	reg_w(gspca_dev, 0x10, buf, 2);
+	if (sd->bridge == BRIDGE_103) {
+		gain = sd->gain >> 1;
+		buf[0] = gain; /* Red */
+		buf[1] = gain; /* Green */
+		buf[2] = gain; /* Blue */
+		reg_w(gspca_dev, 0x05, buf, 3);
+	} else {
+		gain = sd->gain >> 4;
+		buf[0] = gain << 4 | gain; /* Red and blue */
+		buf[1] = gain; /* Green */
+		reg_w(gspca_dev, 0x10, buf, 2);
+	}
 }
 
 static void setexposure(struct gspca_dev *gspca_dev)
@@ -1049,7 +1041,7 @@
 		desired_avg_lum = 5000;
 	} else {
 		deadzone = 1500;
-		desired_avg_lum = 18000;
+		desired_avg_lum = 13000;
 	}
 
 	if (sensor_data[sd->sensor].flags & F_COARSE_EXPO)
@@ -1127,53 +1119,91 @@
 {
 	struct sd *sd = (struct sd *) gspca_dev;
 	struct cam *cam = &gspca_dev->cam;
-	int mode, l;
-	const __u8 *sn9c10x;
-	__u8 reg12_19[8];
+	int i, mode;
+	__u8 regs[0x31];
 
 	mode = cam->cam_mode[gspca_dev->curr_mode].priv & 0x07;
-	sn9c10x = sensor_data[sd->sensor].bridge_init[sd->bridge];
-	l = sensor_data[sd->sensor].bridge_init_size[sd->bridge];
-	memcpy(reg12_19, &sn9c10x[0x12 - 1], 8);
-	reg12_19[6] = sn9c10x[0x18 - 1] | (mode << 4);
-	/* Special cases where reg 17 and or 19 value depends on mode */
+	/* Copy registers 0x01 - 0x19 from the template */
+	memcpy(&regs[0x01], sensor_data[sd->sensor].bridge_init, 0x19);
+	/* Set the mode */
+	regs[0x18] |= mode << 4;
+
+	/* Set bridge gain to 1.0 */
+	if (sd->bridge == BRIDGE_103) {
+		regs[0x05] = 0x20; /* Red */
+		regs[0x06] = 0x20; /* Green */
+		regs[0x07] = 0x20; /* Blue */
+	} else {
+		regs[0x10] = 0x00; /* Red and blue */
+		regs[0x11] = 0x00; /* Green */
+	}
+
+	/* Setup pixel numbers and auto exposure window */
+	if (sensor_data[sd->sensor].flags & F_SIF) {
+		regs[0x1a] = 0x14; /* HO_SIZE 640, makes no sense */
+		regs[0x1b] = 0x0a; /* VO_SIZE 320, makes no sense */
+		regs[0x1c] = 0x02; /* AE H-start 64 */
+		regs[0x1d] = 0x02; /* AE V-start 64 */
+		regs[0x1e] = 0x09; /* AE H-end 288 */
+		regs[0x1f] = 0x07; /* AE V-end 224 */
+	} else {
+		regs[0x1a] = 0x1d; /* HO_SIZE 960, makes no sense */
+		regs[0x1b] = 0x10; /* VO_SIZE 512, makes no sense */
+		regs[0x1c] = 0x05; /* AE H-start 160 */
+		regs[0x1d] = 0x03; /* AE V-start 96 */
+		regs[0x1e] = 0x0f; /* AE H-end 480 */
+		regs[0x1f] = 0x0c; /* AE V-end 384 */
+	}
+
+	/* Setup the gamma table (only used with the sn9c103 bridge) */
+	for (i = 0; i < 16; i++)
+		regs[0x20 + i] = i * 16;
+	regs[0x20 + i] = 255;
+
+	/* Special cases where some regs depend on mode or bridge */
 	switch (sd->sensor) {
 	case SENSOR_TAS5130CXX:
-		/* probably not mode specific at all most likely the upper
+		/* FIXME / TESTME
+		   probably not mode specific at all most likely the upper
 		   nibble of 0x19 is exposure (clock divider) just as with
 		   the tas5110, we need someone to test this. */
-		reg12_19[7] = mode ? 0x23 : 0x43;
+		regs[0x19] = mode ? 0x23 : 0x43;
 		break;
+	case SENSOR_OV7630:
+		/* FIXME / TESTME for some reason with the 101/102 bridge the
+		   clock is set to 12 Mhz (reg1 == 0x04), rather then 24.
+		   Also the hstart needs to go from 1 to 2 when using a 103,
+		   which is likely related. This does not seem right. */
+		if (sd->bridge == BRIDGE_103) {
+			regs[0x01] = 0x44; /* Select 24 Mhz clock */
+			regs[0x12] = 0x02; /* Set hstart to 2 */
+		}
 	}
 	/* Disable compression when the raw bayer format has been selected */
 	if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_RAW)
-		reg12_19[6] &= ~0x80;
+		regs[0x18] &= ~0x80;
 
 	/* Vga mode emulation on SIF sensor? */
 	if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_REDUCED_SIF) {
-		reg12_19[0] += 16; /* 0x12: hstart adjust */
-		reg12_19[1] += 24; /* 0x13: vstart adjust */
-		reg12_19[3] = 320 / 16; /* 0x15: hsize */
-		reg12_19[4] = 240 / 16; /* 0x16: vsize */
+		regs[0x12] += 16;	/* hstart adjust */
+		regs[0x13] += 24;	/* vstart adjust */
+		regs[0x15]  = 320 / 16; /* hsize */
+		regs[0x16]  = 240 / 16; /* vsize */
 	}
 
 	/* reg 0x01 bit 2 video transfert on */
-	reg_w(gspca_dev, 0x01, &sn9c10x[0x01 - 1], 1);
+	reg_w(gspca_dev, 0x01, &regs[0x01], 1);
 	/* reg 0x17 SensorClk enable inv Clk 0x60 */
-	reg_w(gspca_dev, 0x17, &sn9c10x[0x17 - 1], 1);
+	reg_w(gspca_dev, 0x17, &regs[0x17], 1);
 	/* Set the registers from the template */
-	reg_w(gspca_dev, 0x01, sn9c10x, l);
+	reg_w(gspca_dev, 0x01, &regs[0x01],
+	      (sd->bridge == BRIDGE_103) ? 0x30 : 0x1f);
 
 	/* Init the sensor */
 	i2c_w_vector(gspca_dev, sensor_data[sd->sensor].sensor_init,
 			sensor_data[sd->sensor].sensor_init_size);
-	if (sensor_data[sd->sensor].sensor_bridge_init[sd->bridge])
-		i2c_w_vector(gspca_dev,
-			sensor_data[sd->sensor].sensor_bridge_init[sd->bridge],
-			sensor_data[sd->sensor].sensor_bridge_init_size[
-				sd->bridge]);
 
-	/* Mode specific sensor setup */
+	/* Mode / bridge specific sensor setup */
 	switch (sd->sensor) {
 	case SENSOR_PAS202: {
 		const __u8 i2cpclockdiv[] =
@@ -1181,27 +1211,37 @@
 		/* clockdiv from 4 to 3 (7.5 -> 10 fps) when in low res mode */
 		if (mode)
 			i2c_w(gspca_dev, i2cpclockdiv);
+		break;
 	    }
+	case SENSOR_OV7630:
+		/* FIXME / TESTME We should be able to handle this identical
+		   for the 101/102 and the 103 case */
+		if (sd->bridge == BRIDGE_103) {
+			const __u8 i2c[] = { 0xa0, 0x21, 0x13,
+					     0x80, 0x00, 0x00, 0x00, 0x10 };
+			i2c_w(gspca_dev, i2c);
+		}
+		break;
 	}
 	/* H_size V_size 0x28, 0x1e -> 640x480. 0x16, 0x12 -> 352x288 */
-	reg_w(gspca_dev, 0x15, &reg12_19[3], 2);
+	reg_w(gspca_dev, 0x15, &regs[0x15], 2);
 	/* compression register */
-	reg_w(gspca_dev, 0x18, &reg12_19[6], 1);
+	reg_w(gspca_dev, 0x18, &regs[0x18], 1);
 	/* H_start */
-	reg_w(gspca_dev, 0x12, &reg12_19[0], 1);
+	reg_w(gspca_dev, 0x12, &regs[0x12], 1);
 	/* V_START */
-	reg_w(gspca_dev, 0x13, &reg12_19[1], 1);
+	reg_w(gspca_dev, 0x13, &regs[0x13], 1);
 	/* reset 0x17 SensorClk enable inv Clk 0x60 */
 				/*fixme: ov7630 [17]=68 8f (+20 if 102)*/
-	reg_w(gspca_dev, 0x17, &reg12_19[5], 1);
+	reg_w(gspca_dev, 0x17, &regs[0x17], 1);
 	/*MCKSIZE ->3 */	/*fixme: not ov7630*/
-	reg_w(gspca_dev, 0x19, &reg12_19[7], 1);
+	reg_w(gspca_dev, 0x19, &regs[0x19], 1);
 	/* AE_STRX AE_STRY AE_ENDX AE_ENDY */
-	reg_w(gspca_dev, 0x1c, &sn9c10x[0x1c - 1], 4);
+	reg_w(gspca_dev, 0x1c, &regs[0x1c], 4);
 	/* Enable video transfert */
-	reg_w(gspca_dev, 0x01, &sn9c10x[0], 1);
+	reg_w(gspca_dev, 0x01, &regs[0x01], 1);
 	/* Compression */
-	reg_w(gspca_dev, 0x18, &reg12_19[6], 2);
+	reg_w(gspca_dev, 0x18, &regs[0x18], 2);
 	msleep(20);
 
 	sd->reg11 = -1;
@@ -1525,15 +1565,15 @@
 	.driver_info = (SENSOR_ ## sensor << 8) | BRIDGE_ ## bridge
 
 
-static const struct usb_device_id device_table[] __devinitconst = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x0c45, 0x6001), SB(TAS5110C, 102)}, /* TAS5110C1B */
 	{USB_DEVICE(0x0c45, 0x6005), SB(TAS5110C, 101)}, /* TAS5110C1B */
 	{USB_DEVICE(0x0c45, 0x6007), SB(TAS5110D, 101)}, /* TAS5110D */
 	{USB_DEVICE(0x0c45, 0x6009), SB(PAS106, 101)},
 	{USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)},
 	{USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)},
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
 	{USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)},
+#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
 	{USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)},
 	{USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)},
 #endif
@@ -1544,18 +1584,22 @@
 	{USB_DEVICE(0x0c45, 0x602c), SB(OV7630, 102)},
 	{USB_DEVICE(0x0c45, 0x602d), SB(HV7131R, 102)},
 	{USB_DEVICE(0x0c45, 0x602e), SB(OV7630, 102)},
-	/* {USB_DEVICE(0x0c45, 0x602b), SB(MI03XX, 102)}, */ /* MI0343 MI0360 MI0330 */
+	/* {USB_DEVICE(0x0c45, 0x6030), SB(MI03XX, 102)}, */ /* MI0343 MI0360 MI0330 */
+	/* {USB_DEVICE(0x0c45, 0x6082), SB(MI03XX, 103)}, */ /* MI0343 MI0360 */
+	{USB_DEVICE(0x0c45, 0x6083), SB(HV7131D, 103)},
+	{USB_DEVICE(0x0c45, 0x608c), SB(HV7131R, 103)},
+	/* {USB_DEVICE(0x0c45, 0x608e), SB(CISVF10, 103)}, */
 	{USB_DEVICE(0x0c45, 0x608f), SB(OV7630, 103)},
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
+	{USB_DEVICE(0x0c45, 0x60a8), SB(PAS106, 103)},
+	{USB_DEVICE(0x0c45, 0x60aa), SB(TAS5130CXX, 103)},
 	{USB_DEVICE(0x0c45, 0x60af), SB(PAS202, 103)},
-#endif
 	{USB_DEVICE(0x0c45, 0x60b0), SB(OV7630, 103)},
 	{}
 };
 MODULE_DEVICE_TABLE(usb, device_table);
 
 /* -- device connect -- */
-static int __devinit sd_probe(struct usb_interface *intf,
+static int sd_probe(struct usb_interface *intf,
 			const struct usb_device_id *id)
 {
 	return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 2d0bb17..d6f39ce 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -25,12 +25,12 @@
 #include "gspca.h"
 #include "jpeg.h"
 
-#define V4L2_CID_INFRARED (V4L2_CID_PRIVATE_BASE + 0)
-
 MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>");
 MODULE_DESCRIPTION("GSPCA/SONIX JPEG USB Camera Driver");
 MODULE_LICENSE("GPL");
 
+static int starcam;
+
 /* controls */
 enum e_ctrl {
 	BRIGHTNESS,
@@ -43,7 +43,7 @@
 	HFLIP,
 	VFLIP,
 	SHARPNESS,
-	INFRARED,
+	ILLUM,
 	FREQ,
 	NCTRLS		/* number of controls */
 };
@@ -100,7 +100,8 @@
 };
 
 /* device flags */
-#define PDN_INV	1		/* inverse pin S_PWR_DN / sn_xxx tables */
+#define F_PDN_INV	0x01	/* inverse pin S_PWR_DN / sn_xxx tables */
+#define F_ILLUM		0x02	/* presence of illuminator */
 
 /* sn9c1xx definitions */
 /* register 0x01 */
@@ -124,7 +125,7 @@
 static void setautogain(struct gspca_dev *gspca_dev);
 static void sethvflip(struct gspca_dev *gspca_dev);
 static void setsharpness(struct gspca_dev *gspca_dev);
-static void setinfrared(struct gspca_dev *gspca_dev);
+static void setillum(struct gspca_dev *gspca_dev);
 static void setfreq(struct gspca_dev *gspca_dev);
 
 static const struct ctrl sd_ctrls[NCTRLS] = {
@@ -251,18 +252,17 @@
 	    },
 	    .set_control = setsharpness
 	},
-/* mt9v111 only */
-[INFRARED] = {
+[ILLUM] = {
 	    {
-		.id      = V4L2_CID_INFRARED,
+		.id      = V4L2_CID_ILLUMINATORS_1,
 		.type    = V4L2_CTRL_TYPE_BOOLEAN,
-		.name    = "Infrared",
+		.name    = "Illuminator / infrared",
 		.minimum = 0,
 		.maximum = 1,
 		.step    = 1,
 		.default_value = 0,
 	    },
-	    .set_control = setinfrared
+	    .set_control = setillum
 	},
 /* ov7630/ov7648/ov7660 only */
 [FREQ] = {
@@ -282,32 +282,26 @@
 /* table of the disabled controls */
 static const __u32 ctrl_dis[] = {
 [SENSOR_ADCM1700] =	(1 << AUTOGAIN) |
-			(1 << INFRARED) |
 			(1 << HFLIP) |
 			(1 << VFLIP) |
 			(1 << FREQ),
 
-[SENSOR_GC0307] =	(1 << INFRARED) |
-			(1 << HFLIP) |
+[SENSOR_GC0307] =	(1 << HFLIP) |
 			(1 << VFLIP) |
 			(1 << FREQ),
 
-[SENSOR_HV7131R] =	(1 << INFRARED) |
-			(1 << HFLIP) |
+[SENSOR_HV7131R] =	(1 << HFLIP) |
 			(1 << FREQ),
 
-[SENSOR_MI0360] =	(1 << INFRARED) |
-			(1 << HFLIP) |
+[SENSOR_MI0360] =	(1 << HFLIP) |
 			(1 << VFLIP) |
 			(1 << FREQ),
 
-[SENSOR_MI0360B] =	(1 << INFRARED) |
-			(1 << HFLIP) |
+[SENSOR_MI0360B] =	(1 << HFLIP) |
 			(1 << VFLIP) |
 			(1 << FREQ),
 
-[SENSOR_MO4000] =	(1 << INFRARED) |
-			(1 << HFLIP) |
+[SENSOR_MO4000] =	(1 << HFLIP) |
 			(1 << VFLIP) |
 			(1 << FREQ),
 
@@ -315,40 +309,32 @@
 			(1 << VFLIP) |
 			(1 << FREQ),
 
-[SENSOR_OM6802] =	(1 << INFRARED) |
-			(1 << HFLIP) |
+[SENSOR_OM6802] =	(1 << HFLIP) |
 			(1 << VFLIP) |
 			(1 << FREQ),
 
-[SENSOR_OV7630] =	(1 << INFRARED) |
-			(1 << HFLIP),
+[SENSOR_OV7630] =	(1 << HFLIP),
 
-[SENSOR_OV7648] =	(1 << INFRARED) |
-			(1 << HFLIP),
+[SENSOR_OV7648] =	(1 << HFLIP),
 
 [SENSOR_OV7660] =	(1 << AUTOGAIN) |
-			(1 << INFRARED) |
 			(1 << HFLIP) |
 			(1 << VFLIP),
 
 [SENSOR_PO1030] =	(1 << AUTOGAIN) |
-			(1 << INFRARED) |
 			(1 << HFLIP) |
 			(1 << VFLIP) |
 			(1 << FREQ),
 
 [SENSOR_PO2030N] =	(1 << AUTOGAIN) |
-			(1 << INFRARED) |
 			(1 << FREQ),
 
 [SENSOR_SOI768] =	(1 << AUTOGAIN) |
-			(1 << INFRARED) |
 			(1 << HFLIP) |
 			(1 << VFLIP) |
 			(1 << FREQ),
 
 [SENSOR_SP80708] =	(1 << AUTOGAIN) |
-			(1 << INFRARED) |
 			(1 << HFLIP) |
 			(1 << VFLIP) |
 			(1 << FREQ),
@@ -1822,44 +1808,46 @@
 	PDEBUG(D_PROBE, "Sonix chip id: %02x", regF1);
 	switch (sd->bridge) {
 	case BRIDGE_SN9C102P:
-		if (regF1 != 0x11)
-			return -ENODEV;
-		reg_w1(gspca_dev, 0x02, regGpio[1]);
-		break;
 	case BRIDGE_SN9C105:
 		if (regF1 != 0x11)
 			return -ENODEV;
-		if (sd->sensor == SENSOR_MI0360)
-			mi0360_probe(gspca_dev);
-		reg_w(gspca_dev, 0x01, regGpio, 2);
-		break;
-	case BRIDGE_SN9C120:
-		if (regF1 != 0x12)
-			return -ENODEV;
-		switch (sd->sensor) {
-		case SENSOR_MI0360:
-			mi0360_probe(gspca_dev);
-			break;
-		case SENSOR_OV7630:
-			ov7630_probe(gspca_dev);
-			break;
-		case SENSOR_OV7648:
-			ov7648_probe(gspca_dev);
-			break;
-		case SENSOR_PO2030N:
-			po2030n_probe(gspca_dev);
-			break;
-		}
-		regGpio[1] = 0x70;		/* no audio */
-		reg_w(gspca_dev, 0x01, regGpio, 2);
 		break;
 	default:
 /*	case BRIDGE_SN9C110: */
-/*	case BRIDGE_SN9C325: */
+/*	case BRIDGE_SN9C120: */
 		if (regF1 != 0x12)
 			return -ENODEV;
+	}
+
+	switch (sd->sensor) {
+	case SENSOR_MI0360:
+		mi0360_probe(gspca_dev);
+		break;
+	case SENSOR_OV7630:
+		ov7630_probe(gspca_dev);
+		break;
+	case SENSOR_OV7648:
+		ov7648_probe(gspca_dev);
+		break;
+	case SENSOR_PO2030N:
+		po2030n_probe(gspca_dev);
+		break;
+	}
+
+	switch (sd->bridge) {
+	case BRIDGE_SN9C102P:
+		reg_w1(gspca_dev, 0x02, regGpio[1]);
+		break;
+	case BRIDGE_SN9C105:
+		reg_w(gspca_dev, 0x01, regGpio, 2);
+		break;
+	case BRIDGE_SN9C110:
 		reg_w1(gspca_dev, 0x02, 0x62);
 		break;
+	case BRIDGE_SN9C120:
+		regGpio[1] = 0x70;		/* no audio */
+		reg_w(gspca_dev, 0x01, regGpio, 2);
+		break;
 	}
 
 	if (sd->sensor == SENSOR_OM6802)
@@ -1874,6 +1862,8 @@
 	sd->i2c_addr = sn9c1xx[9];
 
 	gspca_dev->ctrl_dis = ctrl_dis[sd->sensor];
+	if (!(sd->flags & F_ILLUM))
+		gspca_dev->ctrl_dis |= (1 << ILLUM);
 
 	return gspca_dev->usb_err;
 }
@@ -2197,16 +2187,28 @@
 	reg_w1(gspca_dev, 0x99, sd->ctrls[SHARPNESS].val);
 }
 
-static void setinfrared(struct gspca_dev *gspca_dev)
+static void setillum(struct gspca_dev *gspca_dev)
 {
 	struct sd *sd = (struct sd *) gspca_dev;
 
-	if (gspca_dev->ctrl_dis & (1 << INFRARED))
+	if (gspca_dev->ctrl_dis & (1 << ILLUM))
 		return;
-/*fixme: different sequence for StarCam Clip and StarCam 370i */
-/* Clip */
-	i2c_w1(gspca_dev, 0x02,				/* gpio */
-		sd->ctrls[INFRARED].val ? 0x66 : 0x64);
+	switch (sd->sensor) {
+	case SENSOR_ADCM1700:
+		reg_w1(gspca_dev, 0x02,				/* gpio */
+			sd->ctrls[ILLUM].val ? 0x64 : 0x60);
+		break;
+	case SENSOR_MT9V111:
+		if (starcam)
+			reg_w1(gspca_dev, 0x02,
+				sd->ctrls[ILLUM].val ?
+						0x55 : 0x54);	/* 370i */
+		else
+			reg_w1(gspca_dev, 0x02,
+				sd->ctrls[ILLUM].val ?
+						0x66 : 0x64);	/* Clip */
+		break;
+	}
 }
 
 static void setfreq(struct gspca_dev *gspca_dev)
@@ -2344,7 +2346,7 @@
 	/* sensor clock already enabled in sd_init */
 	/* reg_w1(gspca_dev, 0xf1, 0x00); */
 	reg01 = sn9c1xx[1];
-	if (sd->flags & PDN_INV)
+	if (sd->flags & F_PDN_INV)
 		reg01 ^= S_PDN_INV;		/* power down inverted */
 	reg_w1(gspca_dev, 0x01, reg01);
 
@@ -2907,13 +2909,11 @@
 	.driver_info = (BRIDGE_ ## bridge << 16) \
 			| (SENSOR_ ## sensor << 8) \
 			| (flags)
-static const __devinitdata struct usb_device_id device_table[] = {
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)},
 	{USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)},
-#endif
-	{USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, PDN_INV)},
-	{USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, PDN_INV)},
+	{USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, F_PDN_INV)},
+	{USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, F_PDN_INV)},
 	{USB_DEVICE(0x0471, 0x0327), BS(SN9C105, MI0360)},
 	{USB_DEVICE(0x0471, 0x0328), BS(SN9C105, MI0360)},
 	{USB_DEVICE(0x0471, 0x0330), BS(SN9C105, MI0360)},
@@ -2925,7 +2925,7 @@
 /*	{USB_DEVICE(0x0c45, 0x607b), BS(SN9C102P, OV7660)}, */
 	{USB_DEVICE(0x0c45, 0x607c), BS(SN9C102P, HV7131R)},
 /*	{USB_DEVICE(0x0c45, 0x607e), BS(SN9C102P, OV7630)}, */
-	{USB_DEVICE(0x0c45, 0x60c0), BS(SN9C105, MI0360)},
+	{USB_DEVICE(0x0c45, 0x60c0), BSF(SN9C105, MI0360, F_ILLUM)},
 						/* or MT9V111 */
 /*	{USB_DEVICE(0x0c45, 0x60c2), BS(SN9C105, P1030xC)}, */
 /*	{USB_DEVICE(0x0c45, 0x60c8), BS(SN9C105, OM6802)}, */
@@ -2936,10 +2936,8 @@
 /*	{USB_DEVICE(0x0c45, 0x60fa), BS(SN9C105, OV7648)}, */
 /*	{USB_DEVICE(0x0c45, 0x60f2), BS(SN9C105, OV7660)}, */
 	{USB_DEVICE(0x0c45, 0x60fb), BS(SN9C105, OV7660)},
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
 	{USB_DEVICE(0x0c45, 0x60fc), BS(SN9C105, HV7131R)},
 	{USB_DEVICE(0x0c45, 0x60fe), BS(SN9C105, OV7630)},
-#endif
 	{USB_DEVICE(0x0c45, 0x6100), BS(SN9C120, MI0360)},	/*sn9c128*/
 	{USB_DEVICE(0x0c45, 0x6102), BS(SN9C120, PO2030N)},	/* /GC0305*/
 /*	{USB_DEVICE(0x0c45, 0x6108), BS(SN9C120, OM6802)}, */
@@ -2962,16 +2960,15 @@
 /*	{USB_DEVICE(0x0c45, 0x6132), BS(SN9C120, OV7670)}, */
 	{USB_DEVICE(0x0c45, 0x6138), BS(SN9C120, MO4000)},
 	{USB_DEVICE(0x0c45, 0x613a), BS(SN9C120, OV7648)},
-#if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE
 	{USB_DEVICE(0x0c45, 0x613b), BS(SN9C120, OV7660)},
-#endif
 	{USB_DEVICE(0x0c45, 0x613c), BS(SN9C120, HV7131R)},
 	{USB_DEVICE(0x0c45, 0x613e), BS(SN9C120, OV7630)},
 	{USB_DEVICE(0x0c45, 0x6142), BS(SN9C120, PO2030N)},	/*sn9c120b*/
 						/* or GC0305 / GC0307 */
 	{USB_DEVICE(0x0c45, 0x6143), BS(SN9C120, SP80708)},	/*sn9c120b*/
 	{USB_DEVICE(0x0c45, 0x6148), BS(SN9C120, OM6802)},	/*sn9c120b*/
-	{USB_DEVICE(0x0c45, 0x614a), BS(SN9C120, ADCM1700)},	/*sn9c120b*/
+	{USB_DEVICE(0x0c45, 0x614a), BSF(SN9C120, ADCM1700, F_ILLUM)},
+/*	{USB_DEVICE(0x0c45, 0x614c), BS(SN9C120, GC0306)}, */	/*sn9c120b*/
 	{}
 };
 MODULE_DEVICE_TABLE(usb, device_table);
@@ -3007,3 +3004,7 @@
 
 module_init(sd_mod_init);
 module_exit(sd_mod_exit);
+
+module_param(starcam, int, 0644);
+MODULE_PARM_DESC(starcam,
+	"StarCam model. 0: Clip, 1: 370i");
diff --git a/drivers/media/video/gspca/spca1528.c b/drivers/media/video/gspca/spca1528.c
index e643386..76c006b 100644
--- a/drivers/media/video/gspca/spca1528.c
+++ b/drivers/media/video/gspca/spca1528.c
@@ -555,7 +555,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x04fc, 0x1528)},
 	{}
 };
diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c
index 8e202b9..45552c3 100644
--- a/drivers/media/video/gspca/spca500.c
+++ b/drivers/media/video/gspca/spca500.c
@@ -1051,7 +1051,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x040a, 0x0300), .driver_info = KodakEZ200},
 	{USB_DEVICE(0x041e, 0x400a), .driver_info = CreativePCCam300},
 	{USB_DEVICE(0x046d, 0x0890), .driver_info = LogitechTraveler},
diff --git a/drivers/media/video/gspca/spca501.c b/drivers/media/video/gspca/spca501.c
index 642839a..f7ef282 100644
--- a/drivers/media/video/gspca/spca501.c
+++ b/drivers/media/video/gspca/spca501.c
@@ -2155,7 +2155,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x040a, 0x0002), .driver_info = KodakDVC325},
 	{USB_DEVICE(0x0497, 0xc001), .driver_info = SmileIntlCamera},
 	{USB_DEVICE(0x0506, 0x00df), .driver_info = ThreeComHomeConnectLite},
diff --git a/drivers/media/video/gspca/spca505.c b/drivers/media/video/gspca/spca505.c
index bc9dd90..e5bf865 100644
--- a/drivers/media/video/gspca/spca505.c
+++ b/drivers/media/video/gspca/spca505.c
@@ -786,7 +786,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x041e, 0x401d), .driver_info = Nxultra},
 	{USB_DEVICE(0x0733, 0x0430), .driver_info = IntelPCCameraPro},
 /*fixme: may be UsbGrabberPV321 BRIDGE_SPCA506 SENSOR_SAA7113 */
diff --git a/drivers/media/video/gspca/spca508.c b/drivers/media/video/gspca/spca508.c
index 7307638..3483193 100644
--- a/drivers/media/video/gspca/spca508.c
+++ b/drivers/media/video/gspca/spca508.c
@@ -1509,7 +1509,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x0130, 0x0130), .driver_info = HamaUSBSightcam},
 	{USB_DEVICE(0x041e, 0x4018), .driver_info = CreativeVista},
 	{USB_DEVICE(0x0733, 0x0110), .driver_info = ViewQuestVQ110},
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index 3a162c6..e836e77 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -1061,7 +1061,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x041e, 0x401a), .driver_info = Rev072A},
 	{USB_DEVICE(0x041e, 0x403b), .driver_info = Rev012A},
 	{USB_DEVICE(0x0458, 0x7004), .driver_info = Rev072A},
diff --git a/drivers/media/video/gspca/sq905.c b/drivers/media/video/gspca/sq905.c
index 4040677..2e9c061 100644
--- a/drivers/media/video/gspca/sq905.c
+++ b/drivers/media/video/gspca/sq905.c
@@ -396,7 +396,7 @@
 }
 
 /* Table of supported USB devices */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x2770, 0x9120)},
 	{}
 };
diff --git a/drivers/media/video/gspca/sq905c.c b/drivers/media/video/gspca/sq905c.c
index 8ba1995..457563b 100644
--- a/drivers/media/video/gspca/sq905c.c
+++ b/drivers/media/video/gspca/sq905c.c
@@ -298,7 +298,7 @@
 }
 
 /* Table of supported USB devices */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x2770, 0x905c)},
 	{USB_DEVICE(0x2770, 0x9050)},
 	{USB_DEVICE(0x2770, 0x9051)},
diff --git a/drivers/media/video/gspca/sq930x.c b/drivers/media/video/gspca/sq930x.c
index a4a9881..8215d5d 100644
--- a/drivers/media/video/gspca/sq930x.c
+++ b/drivers/media/video/gspca/sq930x.c
@@ -1163,7 +1163,7 @@
 #define ST(sensor, type) \
 	.driver_info = (SENSOR_ ## sensor << 8) \
 			| (type)
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x041e, 0x4038), ST(MI0360, 0)},
 	{USB_DEVICE(0x041e, 0x403c), ST(LZ24BP, 0)},
 	{USB_DEVICE(0x041e, 0x403d), ST(LZ24BP, 0)},
diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c
index 11a192b..87be52b 100644
--- a/drivers/media/video/gspca/stk014.c
+++ b/drivers/media/video/gspca/stk014.c
@@ -495,7 +495,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x05e1, 0x0893)},
 	{}
 };
diff --git a/drivers/media/video/gspca/stv0680.c b/drivers/media/video/gspca/stv0680.c
index b199ad4..e2ef41c 100644
--- a/drivers/media/video/gspca/stv0680.c
+++ b/drivers/media/video/gspca/stv0680.c
@@ -327,7 +327,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x0553, 0x0202)},
 	{USB_DEVICE(0x041e, 0x4007)},
 	{}
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index 28ea417..7e066142 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -564,7 +564,7 @@
 
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	/* QuickCam Express */
 	{USB_DEVICE(0x046d, 0x0840), .driver_info = BRIDGE_STV600 },
 	/* LEGO cam / QuickCam Web */
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index a9cbcd6..543542a 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -1162,7 +1162,7 @@
 #define BS(bridge, subtype) \
 	.driver_info = (BRIDGE_ ## bridge << 8) \
 			| (subtype)
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x041e, 0x400b), BS(SPCA504C, 0)},
 	{USB_DEVICE(0x041e, 0x4012), BS(SPCA504C, 0)},
 	{USB_DEVICE(0x041e, 0x4013), BS(SPCA504C, 0)},
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index 8f0c331..a3eccd8 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -1416,7 +1416,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x17a1, 0x0128)},
 	{}
 };
diff --git a/drivers/media/video/gspca/tv8532.c b/drivers/media/video/gspca/tv8532.c
index 38c22f0..933ef2c 100644
--- a/drivers/media/video/gspca/tv8532.c
+++ b/drivers/media/video/gspca/tv8532.c
@@ -388,7 +388,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x046d, 0x0920)},
 	{USB_DEVICE(0x046d, 0x0921)},
 	{USB_DEVICE(0x0545, 0x808b)},
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index 9b2ae1b..6caed73 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -4192,7 +4192,7 @@
 #define BF(bridge, flags) \
 	.driver_info = (BRIDGE_ ## bridge << 8) \
 		| (flags)
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x041e, 0x405b), BF(VC0323, FL_VFLIP)},
 	{USB_DEVICE(0x046d, 0x0892), BF(VC0321, 0)},
 	{USB_DEVICE(0x046d, 0x0896), BF(VC0321, 0)},
diff --git a/drivers/media/video/gspca/xirlink_cit.c b/drivers/media/video/gspca/xirlink_cit.c
index 5b5039a..c089a0f 100644
--- a/drivers/media/video/gspca/xirlink_cit.c
+++ b/drivers/media/video/gspca/xirlink_cit.c
@@ -3270,7 +3270,7 @@
 };
 
 /* -- module initialisation -- */
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{ USB_DEVICE_VER(0x0545, 0x8080, 0x0001, 0x0001), .driver_info = CIT_MODEL0 },
 	{ USB_DEVICE_VER(0x0545, 0x8080, 0x0002, 0x0002), .driver_info = CIT_MODEL1 },
 	{ USB_DEVICE_VER(0x0545, 0x8080, 0x030a, 0x030a), .driver_info = CIT_MODEL2 },
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index 14b85d4..47236a5 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -5793,7 +5793,7 @@
 			break;
 		default:
 /*		case 0xdd:	 * delay */
-			msleep(action->val / 64 + 10);
+			msleep(action->idx);
 			break;
 		}
 		action++;
@@ -5830,7 +5830,7 @@
 		[SENSOR_GC0305] =	gc0305_matrix,
 		[SENSOR_HDCS2020b] =	NULL,
 		[SENSOR_HV7131B] =	NULL,
-		[SENSOR_HV7131R] =	NULL,
+		[SENSOR_HV7131R] =	po2030_matrix,
 		[SENSOR_ICM105A] =	po2030_matrix,
 		[SENSOR_MC501CB] =	NULL,
 		[SENSOR_MT9V111_1] =	gc0305_matrix,
@@ -5936,6 +5936,7 @@
 	case SENSOR_ADCM2700:
 	case SENSOR_GC0305:
 	case SENSOR_HV7131B:
+	case SENSOR_HV7131R:
 	case SENSOR_OV7620:
 	case SENSOR_PAS202B:
 	case SENSOR_PO2030:
@@ -6108,11 +6109,13 @@
 		reg_w(gspca_dev, 0x02, 0x003b);
 		reg_w(gspca_dev, 0x00, 0x0038);
 		break;
+	case SENSOR_HV7131R:
 	case SENSOR_PAS202B:
 		reg_w(gspca_dev, 0x03, 0x003b);
 		reg_w(gspca_dev, 0x0c, 0x003a);
 		reg_w(gspca_dev, 0x0b, 0x0039);
-		reg_w(gspca_dev, 0x0b, 0x0038);
+		if (sensor == SENSOR_PAS202B)
+			reg_w(gspca_dev, 0x0b, 0x0038);
 		break;
 	}
 }
@@ -6704,10 +6707,13 @@
 		reg_w(gspca_dev, 0x02, 0x003b);
 		reg_w(gspca_dev, 0x00, 0x0038);
 		break;
+	case SENSOR_HV7131R:
 	case SENSOR_PAS202B:
 		reg_w(gspca_dev, 0x03, 0x003b);
 		reg_w(gspca_dev, 0x0c, 0x003a);
 		reg_w(gspca_dev, 0x0b, 0x0039);
+		if (sd->sensor == SENSOR_HV7131R)
+			reg_w(gspca_dev, 0x50, ZC3XX_R11D_GLOBALGAIN);
 		break;
 	}
 
@@ -6720,6 +6726,7 @@
 		break;
 	case SENSOR_PAS202B:
 	case SENSOR_GC0305:
+	case SENSOR_HV7131R:
 	case SENSOR_TAS5130C:
 		reg_r(gspca_dev, 0x0008);
 		/* fall thru */
@@ -6760,6 +6767,12 @@
 						/* ms-win + */
 		reg_w(gspca_dev, 0x40, 0x0117);
 		break;
+	case SENSOR_HV7131R:
+		i2c_write(gspca_dev, 0x25, 0x04, 0x00);	/* exposure */
+		i2c_write(gspca_dev, 0x26, 0x93, 0x00);
+		i2c_write(gspca_dev, 0x27, 0xe0, 0x00);
+		reg_w(gspca_dev, 0x00, ZC3XX_R1A7_CALCGLOBALMEAN);
+		break;
 	case SENSOR_GC0305:
 	case SENSOR_TAS5130C:
 		reg_w(gspca_dev, 0x09, 0x01ad);	/* (from win traces) */
@@ -6808,9 +6821,17 @@
 {
 	struct sd *sd = (struct sd *) gspca_dev;
 
-	if (data[0] == 0xff && data[1] == 0xd8) {	/* start of frame */
+	/* check the JPEG end of frame */
+	if (len >= 3
+	 && data[len - 3] == 0xff && data[len - 2] == 0xd9) {
+/*fixme: what does the last byte mean?*/
 		gspca_frame_add(gspca_dev, LAST_PACKET,
-					NULL, 0);
+					data, len - 1);
+		return;
+	}
+
+	/* check the JPEG start of a frame */
+	if (data[0] == 0xff && data[1] == 0xd8) {
 		/* put the JPEG header in the new frame */
 		gspca_frame_add(gspca_dev, FIRST_PACKET,
 			sd->jpeg_hdr, JPEG_HDR_SZ);
@@ -6909,7 +6930,7 @@
 #endif
 };
 
-static const __devinitdata struct usb_device_id device_table[] = {
+static const struct usb_device_id device_table[] = {
 	{USB_DEVICE(0x041e, 0x041e)},
 	{USB_DEVICE(0x041e, 0x4017)},
 	{USB_DEVICE(0x041e, 0x401c), .driver_info = SENSOR_PAS106},
diff --git a/drivers/media/video/hdpvr/Makefile b/drivers/media/video/hdpvr/Makefile
index e0230fc..3baa9f6 100644
--- a/drivers/media/video/hdpvr/Makefile
+++ b/drivers/media/video/hdpvr/Makefile
@@ -1,6 +1,4 @@
-hdpvr-objs	:= hdpvr-control.o hdpvr-core.o hdpvr-video.o
-
-hdpvr-$(CONFIG_I2C) += hdpvr-i2c.o
+hdpvr-objs	:= hdpvr-control.o hdpvr-core.o hdpvr-video.o hdpvr-i2c.o
 
 obj-$(CONFIG_VIDEO_HDPVR) += hdpvr.o
 
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index f7d1ee5..a27d93b 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -283,6 +283,7 @@
 	struct hdpvr_device *dev;
 	struct usb_host_interface *iface_desc;
 	struct usb_endpoint_descriptor *endpoint;
+	struct i2c_client *client;
 	size_t buffer_size;
 	int i;
 	int retval = -ENOMEM;
@@ -378,25 +379,35 @@
 		goto error;
 	}
 
-#ifdef CONFIG_I2C
-	/* until i2c is working properly */
-	retval = 0; /* hdpvr_register_i2c_adapter(dev); */
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+	retval = hdpvr_register_i2c_adapter(dev);
 	if (retval < 0) {
-		v4l2_err(&dev->v4l2_dev, "registering i2c adapter failed\n");
+		v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n");
 		goto error;
 	}
 
-	/* until i2c is working properly */
-	retval = 0; /* hdpvr_register_i2c_ir(dev); */
-	if (retval < 0)
-		v4l2_err(&dev->v4l2_dev, "registering i2c IR devices failed\n");
-#endif /* CONFIG_I2C */
+	client = hdpvr_register_ir_rx_i2c(dev);
+	if (!client) {
+		v4l2_err(&dev->v4l2_dev, "i2c IR RX device register failed\n");
+		goto reg_fail;
+	}
+
+	client = hdpvr_register_ir_tx_i2c(dev);
+	if (!client) {
+		v4l2_err(&dev->v4l2_dev, "i2c IR TX device register failed\n");
+		goto reg_fail;
+	}
+#endif
 
 	/* let the user know what node this device is now attached to */
 	v4l2_info(&dev->v4l2_dev, "device now attached to %s\n",
 		  video_device_node_name(dev->video_dev));
 	return 0;
 
+reg_fail:
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+	i2c_del_adapter(&dev->i2c_adapter);
+#endif
 error:
 	if (dev) {
 		/* Destroy single thread */
@@ -426,6 +437,9 @@
 	mutex_lock(&dev->io_mutex);
 	hdpvr_cancel_queue(dev);
 	mutex_unlock(&dev->io_mutex);
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+	i2c_del_adapter(&dev->i2c_adapter);
+#endif
 	video_unregister_device(dev->video_dev);
 	atomic_dec(&dev_nr);
 }
diff --git a/drivers/media/video/hdpvr/hdpvr-i2c.c b/drivers/media/video/hdpvr/hdpvr-i2c.c
index 24966aa..e53fa55 100644
--- a/drivers/media/video/hdpvr/hdpvr-i2c.c
+++ b/drivers/media/video/hdpvr/hdpvr-i2c.c
@@ -13,6 +13,8 @@
  *
  */
 
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+
 #include <linux/i2c.h>
 #include <linux/slab.h>
 
@@ -28,106 +30,86 @@
 #define Z8F0811_IR_TX_I2C_ADDR	0x70
 #define Z8F0811_IR_RX_I2C_ADDR	0x71
 
-static const u8 ir_i2c_addrs[] = {
-	Z8F0811_IR_TX_I2C_ADDR,
-	Z8F0811_IR_RX_I2C_ADDR,
-};
 
-static const char * const ir_devicenames[] = {
-	"ir_tx_z8f0811_hdpvr",
-	"ir_rx_z8f0811_hdpvr",
-};
-
-static int hdpvr_new_i2c_ir(struct hdpvr_device *dev, struct i2c_adapter *adap,
-			    const char *type, u8 addr)
+struct i2c_client *hdpvr_register_ir_tx_i2c(struct hdpvr_device *dev)
 {
-	struct i2c_board_info info;
 	struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data;
-	unsigned short addr_list[2] = { addr, I2C_CLIENT_END };
+	struct i2c_board_info hdpvr_ir_tx_i2c_board_info = {
+		I2C_BOARD_INFO("ir_tx_z8f0811_hdpvr", Z8F0811_IR_TX_I2C_ADDR),
+	};
 
-	memset(&info, 0, sizeof(struct i2c_board_info));
-	strlcpy(info.type, type, I2C_NAME_SIZE);
+	init_data->name = "HD-PVR";
+	hdpvr_ir_tx_i2c_board_info.platform_data = init_data;
+
+	return i2c_new_device(&dev->i2c_adapter, &hdpvr_ir_tx_i2c_board_info);
+}
+
+struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev)
+{
+	struct IR_i2c_init_data *init_data = &dev->ir_i2c_init_data;
+	struct i2c_board_info hdpvr_ir_rx_i2c_board_info = {
+		I2C_BOARD_INFO("ir_rx_z8f0811_hdpvr", Z8F0811_IR_RX_I2C_ADDR),
+	};
 
 	/* Our default information for ir-kbd-i2c.c to use */
-	switch (addr) {
-	case Z8F0811_IR_RX_I2C_ADDR:
-		init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
-		init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
-		init_data->type = RC_TYPE_RC5;
-		init_data->name = "HD PVR";
-		info.platform_data = init_data;
-		break;
-	}
+	init_data->ir_codes = RC_MAP_HAUPPAUGE_NEW;
+	init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
+	init_data->type = RC_TYPE_RC5;
+	init_data->name = "HD-PVR";
+	hdpvr_ir_rx_i2c_board_info.platform_data = init_data;
 
-	return i2c_new_probed_device(adap, &info, addr_list, NULL) == NULL ?
-	       -1 : 0;
+	return i2c_new_device(&dev->i2c_adapter, &hdpvr_ir_rx_i2c_board_info);
 }
 
-int hdpvr_register_i2c_ir(struct hdpvr_device *dev)
-{
-	int i;
-	int ret = 0;
-
-	for (i = 0; i < ARRAY_SIZE(ir_i2c_addrs); i++)
-		ret += hdpvr_new_i2c_ir(dev, dev->i2c_adapter,
-					ir_devicenames[i], ir_i2c_addrs[i]);
-
-	return ret;
-}
-
-static int hdpvr_i2c_read(struct hdpvr_device *dev, unsigned char addr,
-			  char *data, int len)
+static int hdpvr_i2c_read(struct hdpvr_device *dev, int bus,
+			  unsigned char addr, char *data, int len)
 {
 	int ret;
-	char *buf = kmalloc(len, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
+
+	if (len > sizeof(dev->i2c_buf))
+		return -EINVAL;
 
 	ret = usb_control_msg(dev->udev,
 			      usb_rcvctrlpipe(dev->udev, 0),
 			      REQTYPE_I2C_READ, CTRL_READ_REQUEST,
-			      0x100|addr, 0, buf, len, 1000);
+			      (bus << 8) | addr, 0, &dev->i2c_buf, len, 1000);
 
 	if (ret == len) {
-		memcpy(data, buf, len);
+		memcpy(data, &dev->i2c_buf, len);
 		ret = 0;
 	} else if (ret >= 0)
 		ret = -EIO;
 
-	kfree(buf);
-
 	return ret;
 }
 
-static int hdpvr_i2c_write(struct hdpvr_device *dev, unsigned char addr,
-			   char *data, int len)
+static int hdpvr_i2c_write(struct hdpvr_device *dev, int bus,
+			   unsigned char addr, char *data, int len)
 {
 	int ret;
-	char *buf = kmalloc(len, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
 
-	memcpy(buf, data, len);
+	if (len > sizeof(dev->i2c_buf))
+		return -EINVAL;
+
+	memcpy(&dev->i2c_buf, data, len);
 	ret = usb_control_msg(dev->udev,
 			      usb_sndctrlpipe(dev->udev, 0),
 			      REQTYPE_I2C_WRITE, CTRL_WRITE_REQUEST,
-			      0x100|addr, 0, buf, len, 1000);
+			      (bus << 8) | addr, 0, &dev->i2c_buf, len, 1000);
 
 	if (ret < 0)
-		goto error;
+		return ret;
 
 	ret = usb_control_msg(dev->udev,
 			      usb_rcvctrlpipe(dev->udev, 0),
 			      REQTYPE_I2C_WRITE_STATT, CTRL_READ_REQUEST,
-			      0, 0, buf, 2, 1000);
+			      0, 0, &dev->i2c_buf, 2, 1000);
 
-	if (ret == 2)
+	if ((ret == 2) && (dev->i2c_buf[1] == (len - 1)))
 		ret = 0;
 	else if (ret >= 0)
 		ret = -EIO;
 
-error:
-	kfree(buf);
 	return ret;
 }
 
@@ -146,10 +128,10 @@
 		addr = msgs[i].addr << 1;
 
 		if (msgs[i].flags & I2C_M_RD)
-			retval = hdpvr_i2c_read(dev, addr, msgs[i].buf,
+			retval = hdpvr_i2c_read(dev, 1, addr, msgs[i].buf,
 						msgs[i].len);
 		else
-			retval = hdpvr_i2c_write(dev, addr, msgs[i].buf,
+			retval = hdpvr_i2c_write(dev, 1, addr, msgs[i].buf,
 						 msgs[i].len);
 	}
 
@@ -168,30 +150,47 @@
 	.functionality = hdpvr_functionality,
 };
 
+static struct i2c_adapter hdpvr_i2c_adapter_template = {
+	.name   = "Hauppage HD PVR I2C",
+	.owner  = THIS_MODULE,
+	.algo   = &hdpvr_algo,
+};
+
+static int hdpvr_activate_ir(struct hdpvr_device *dev)
+{
+	char buffer[8];
+
+	mutex_lock(&dev->i2c_mutex);
+
+	hdpvr_i2c_read(dev, 0, 0x54, buffer, 1);
+
+	buffer[0] = 0;
+	buffer[1] = 0x8;
+	hdpvr_i2c_write(dev, 1, 0x54, buffer, 2);
+
+	buffer[1] = 0x18;
+	hdpvr_i2c_write(dev, 1, 0x54, buffer, 2);
+
+	mutex_unlock(&dev->i2c_mutex);
+
+	return 0;
+}
+
 int hdpvr_register_i2c_adapter(struct hdpvr_device *dev)
 {
-	struct i2c_adapter *i2c_adap;
 	int retval = -ENOMEM;
 
-	i2c_adap = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL);
-	if (i2c_adap == NULL)
-		goto error;
+	hdpvr_activate_ir(dev);
 
-	strlcpy(i2c_adap->name, "Hauppauge HD PVR I2C",
-		sizeof(i2c_adap->name));
-	i2c_adap->algo  = &hdpvr_algo;
-	i2c_adap->owner = THIS_MODULE;
-	i2c_adap->dev.parent = &dev->udev->dev;
+	memcpy(&dev->i2c_adapter, &hdpvr_i2c_adapter_template,
+	       sizeof(struct i2c_adapter));
+	dev->i2c_adapter.dev.parent = &dev->udev->dev;
 
-	i2c_set_adapdata(i2c_adap, dev);
+	i2c_set_adapdata(&dev->i2c_adapter, dev);
 
-	retval = i2c_add_adapter(i2c_adap);
+	retval = i2c_add_adapter(&dev->i2c_adapter);
 
-	if (!retval)
-		dev->i2c_adapter = i2c_adap;
-	else
-		kfree(i2c_adap);
-
-error:
 	return retval;
 }
+
+#endif
diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c
index d38fe10..514aea7 100644
--- a/drivers/media/video/hdpvr/hdpvr-video.c
+++ b/drivers/media/video/hdpvr/hdpvr-video.c
@@ -1220,12 +1220,9 @@
 	v4l2_device_unregister(&dev->v4l2_dev);
 
 	/* deregister I2C adapter */
-#ifdef CONFIG_I2C
+#if defined(CONFIG_I2C) || (CONFIG_I2C_MODULE)
 	mutex_lock(&dev->i2c_mutex);
-	if (dev->i2c_adapter)
-		i2c_del_adapter(dev->i2c_adapter);
-	kfree(dev->i2c_adapter);
-	dev->i2c_adapter = NULL;
+	i2c_del_adapter(&dev->i2c_adapter);
 	mutex_unlock(&dev->i2c_mutex);
 #endif /* CONFIG_I2C */
 
diff --git a/drivers/media/video/hdpvr/hdpvr.h b/drivers/media/video/hdpvr/hdpvr.h
index 37f1e4c..072f23c 100644
--- a/drivers/media/video/hdpvr/hdpvr.h
+++ b/drivers/media/video/hdpvr/hdpvr.h
@@ -25,6 +25,7 @@
 	KERNEL_VERSION(HDPVR_MAJOR_VERSION, HDPVR_MINOR_VERSION, HDPVR_RELEASE)
 
 #define HDPVR_MAX 8
+#define HDPVR_I2C_MAX_SIZE 128
 
 /* Define these values to match your devices */
 #define HD_PVR_VENDOR_ID	0x2040
@@ -106,9 +107,11 @@
 	struct work_struct	worker;
 
 	/* I2C adapter */
-	struct i2c_adapter	*i2c_adapter;
+	struct i2c_adapter	i2c_adapter;
 	/* I2C lock */
 	struct mutex		i2c_mutex;
+	/* I2C message buffer space */
+	char			i2c_buf[HDPVR_I2C_MAX_SIZE];
 
 	/* For passing data to ir-kbd-i2c */
 	struct IR_i2c_init_data	ir_i2c_init_data;
@@ -310,7 +313,8 @@
 /* i2c adapter registration */
 int hdpvr_register_i2c_adapter(struct hdpvr_device *dev);
 
-int hdpvr_register_i2c_ir(struct hdpvr_device *dev);
+struct i2c_client *hdpvr_register_ir_rx_i2c(struct hdpvr_device *dev);
+struct i2c_client *hdpvr_register_ir_tx_i2c(struct hdpvr_device *dev);
 
 /*========================================================================*/
 /* buffer management */
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index c87b6bc..a221ad6 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -128,6 +128,19 @@
 
 static int get_key_haup_xvr(struct IR_i2c *ir, u32 *ir_key, u32 *ir_raw)
 {
+	int ret;
+	unsigned char buf[1] = { 0 };
+
+	/*
+	 * This is the same apparent "are you ready?" poll command observed
+	 * watching Windows driver traffic and implemented in lirc_zilog. With
+	 * this added, we get far saner remote behavior with z8 chips on usb
+	 * connected devices, even with the default polling interval of 100ms.
+	 */
+	ret = i2c_master_send(ir->c, buf, 1);
+	if (ret != 1)
+		return (ret < 0) ? ret : -EINVAL;
+
 	return get_key_haup_common (ir, ir_key, ir_raw, 6, 3);
 }
 
@@ -244,15 +257,17 @@
 	static u32 ir_key, ir_raw;
 	int rc;
 
-	dprintk(2,"ir_poll_key\n");
+	dprintk(3, "%s\n", __func__);
 	rc = ir->get_key(ir, &ir_key, &ir_raw);
 	if (rc < 0) {
 		dprintk(2,"error\n");
 		return;
 	}
 
-	if (rc)
+	if (rc) {
+		dprintk(1, "%s: keycode = 0x%04x\n", __func__, ir_key);
 		rc_keydown(ir->rc, ir_key, 0);
+	}
 }
 
 static void ir_work(struct work_struct *work)
@@ -321,6 +336,12 @@
 		rc_type     = RC_TYPE_OTHER;
 		ir_codes    = RC_MAP_AVERMEDIA_CARDBUS;
 		break;
+	case 0x71:
+		name        = "Hauppauge/Zilog Z8";
+		ir->get_key = get_key_haup_xvr;
+		rc_type     = RC_TYPE_RC5;
+		ir_codes    = hauppauge ? RC_MAP_HAUPPAUGE_NEW : RC_MAP_RC5_TV;
+		break;
 	}
 
 	/* Let the caller override settings */
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index e103b8f..9fb86a0 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -300,10 +300,15 @@
 				adap, type, 0, I2C_ADDRS(hw_addrs[idx]));
 	} else if (hw == IVTV_HW_CX25840) {
 		struct cx25840_platform_data pdata;
+		struct i2c_board_info cx25840_info = {
+			.type = "cx25840",
+			.addr = hw_addrs[idx],
+			.platform_data = &pdata,
+		};
 
 		pdata.pvr150_workaround = itv->pvr150_workaround;
-		sd = v4l2_i2c_new_subdev_cfg(&itv->v4l2_dev,
-				adap, type, 0, &pdata, hw_addrs[idx], NULL);
+		sd = v4l2_i2c_new_subdev_board(&itv->v4l2_dev, adap,
+				&cx25840_info, NULL);
 	} else {
 		sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
 				adap, type, hw_addrs[idx], NULL);
diff --git a/drivers/media/video/mt9v011.c b/drivers/media/video/mt9v011.c
index 209ff97..4904d25 100644
--- a/drivers/media/video/mt9v011.c
+++ b/drivers/media/video/mt9v011.c
@@ -12,17 +12,41 @@
 #include <asm/div64.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-chip-ident.h>
-#include "mt9v011.h"
+#include <media/mt9v011.h>
 
 MODULE_DESCRIPTION("Micron mt9v011 sensor driver");
 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
 MODULE_LICENSE("GPL");
 
-
 static int debug;
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug level (0-2)");
 
+#define R00_MT9V011_CHIP_VERSION	0x00
+#define R01_MT9V011_ROWSTART		0x01
+#define R02_MT9V011_COLSTART		0x02
+#define R03_MT9V011_HEIGHT		0x03
+#define R04_MT9V011_WIDTH		0x04
+#define R05_MT9V011_HBLANK		0x05
+#define R06_MT9V011_VBLANK		0x06
+#define R07_MT9V011_OUT_CTRL		0x07
+#define R09_MT9V011_SHUTTER_WIDTH	0x09
+#define R0A_MT9V011_CLK_SPEED		0x0a
+#define R0B_MT9V011_RESTART		0x0b
+#define R0C_MT9V011_SHUTTER_DELAY	0x0c
+#define R0D_MT9V011_RESET		0x0d
+#define R1E_MT9V011_DIGITAL_ZOOM	0x1e
+#define R20_MT9V011_READ_MODE		0x20
+#define R2B_MT9V011_GREEN_1_GAIN	0x2b
+#define R2C_MT9V011_BLUE_GAIN		0x2c
+#define R2D_MT9V011_RED_GAIN		0x2d
+#define R2E_MT9V011_GREEN_2_GAIN	0x2e
+#define R35_MT9V011_GLOBAL_GAIN		0x35
+#define RF1_MT9V011_CHIP_ENABLE		0xf1
+
+#define MT9V011_VERSION			0x8232
+#define MT9V011_REV_B_VERSION		0x8243
+
 /* supported controls */
 static struct v4l2_queryctrl mt9v011_qctrl[] = {
 	{
@@ -469,23 +493,6 @@
 	return 0;
 }
 
-static int mt9v011_s_config(struct v4l2_subdev *sd, int dumb, void *data)
-{
-	struct mt9v011 *core = to_mt9v011(sd);
-	unsigned *xtal = data;
-
-	v4l2_dbg(1, debug, sd, "s_config called\n");
-
-	if (xtal) {
-		core->xtal = *xtal;
-		v4l2_dbg(1, debug, sd, "xtal set to %d.%03d MHz\n",
-			 *xtal / 1000000, (*xtal / 1000) % 1000);
-	}
-
-	return 0;
-}
-
-
 #ifdef CONFIG_VIDEO_ADV_DEBUG
 static int mt9v011_g_register(struct v4l2_subdev *sd,
 			      struct v4l2_dbg_register *reg)
@@ -536,7 +543,6 @@
 	.g_ctrl = mt9v011_g_ctrl,
 	.s_ctrl = mt9v011_s_ctrl,
 	.reset = mt9v011_reset,
-	.s_config = mt9v011_s_config,
 	.g_chip_ident = mt9v011_g_chip_ident,
 #ifdef CONFIG_VIDEO_ADV_DEBUG
 	.g_register = mt9v011_g_register,
@@ -596,6 +602,14 @@
 	core->height = 480;
 	core->xtal = 27000000;	/* Hz */
 
+	if (c->dev.platform_data) {
+		struct mt9v011_platform_data *pdata = c->dev.platform_data;
+
+		core->xtal = pdata->xtal;
+		v4l2_dbg(1, debug, sd, "xtal set to %d.%03d MHz\n",
+			core->xtal / 1000000, (core->xtal / 1000) % 1000);
+	}
+
 	v4l_info(c, "chip found @ 0x%02x (%s - chip version 0x%04x)\n",
 		 c->addr << 1, c->adapter->name, version);
 
diff --git a/drivers/media/video/mt9v011.h b/drivers/media/video/mt9v011.h
deleted file mode 100644
index 3350fd6..0000000
--- a/drivers/media/video/mt9v011.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * mt9v011 -Micron 1/4-Inch VGA Digital Image Sensor
- *
- * Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com)
- * This code is placed under the terms of the GNU General Public License v2
- */
-
-#ifndef MT9V011_H_
-#define MT9V011_H_
-
-#define R00_MT9V011_CHIP_VERSION	0x00
-#define R01_MT9V011_ROWSTART		0x01
-#define R02_MT9V011_COLSTART		0x02
-#define R03_MT9V011_HEIGHT		0x03
-#define R04_MT9V011_WIDTH		0x04
-#define R05_MT9V011_HBLANK		0x05
-#define R06_MT9V011_VBLANK		0x06
-#define R07_MT9V011_OUT_CTRL		0x07
-#define R09_MT9V011_SHUTTER_WIDTH	0x09
-#define R0A_MT9V011_CLK_SPEED		0x0a
-#define R0B_MT9V011_RESTART		0x0b
-#define R0C_MT9V011_SHUTTER_DELAY	0x0c
-#define R0D_MT9V011_RESET		0x0d
-#define R1E_MT9V011_DIGITAL_ZOOM	0x1e
-#define R20_MT9V011_READ_MODE		0x20
-#define R2B_MT9V011_GREEN_1_GAIN	0x2b
-#define R2C_MT9V011_BLUE_GAIN		0x2c
-#define R2D_MT9V011_RED_GAIN		0x2d
-#define R2E_MT9V011_GREEN_2_GAIN	0x2e
-#define R35_MT9V011_GLOBAL_GAIN		0x35
-#define RF1_MT9V011_CHIP_ENABLE		0xf1
-
-#define MT9V011_VERSION			0x8232
-#define MT9V011_REV_B_VERSION		0x8243
-
-#endif
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index c881a64..d4e7c11 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -1449,47 +1449,6 @@
 	return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_OV7670, 0);
 }
 
-static int ov7670_s_config(struct v4l2_subdev *sd, int dumb, void *data)
-{
-	struct i2c_client *client = v4l2_get_subdevdata(sd);
-	struct ov7670_config *config = data;
-	struct ov7670_info *info = to_state(sd);
-	int ret;
-
-	info->clock_speed = 30; /* default: a guess */
-
-	/*
-	 * Must apply configuration before initializing device, because it
-	 * selects I/O method.
-	 */
-	if (config) {
-		info->min_width = config->min_width;
-		info->min_height = config->min_height;
-		info->use_smbus = config->use_smbus;
-
-		if (config->clock_speed)
-			info->clock_speed = config->clock_speed;
-	}
-
-	/* Make sure it's an ov7670 */
-	ret = ov7670_detect(sd);
-	if (ret) {
-		v4l_dbg(1, debug, client,
-			"chip found @ 0x%x (%s) is not an ov7670 chip.\n",
-			client->addr << 1, client->adapter->name);
-		kfree(info);
-		return ret;
-	}
-	v4l_info(client, "chip found @ 0x%02x (%s)\n",
-			client->addr << 1, client->adapter->name);
-
-	info->fmt = &ov7670_formats[0];
-	info->sat = 128;	/* Review this */
-	info->clkrc = info->clock_speed / 30;
-
-	return 0;
-}
-
 #ifdef CONFIG_VIDEO_ADV_DEBUG
 static int ov7670_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
 {
@@ -1528,7 +1487,6 @@
 	.s_ctrl = ov7670_s_ctrl,
 	.queryctrl = ov7670_queryctrl,
 	.reset = ov7670_reset,
-	.s_config = ov7670_s_config,
 	.init = ov7670_init,
 #ifdef CONFIG_VIDEO_ADV_DEBUG
 	.g_register = ov7670_g_register,
@@ -1558,6 +1516,7 @@
 {
 	struct v4l2_subdev *sd;
 	struct ov7670_info *info;
+	int ret;
 
 	info = kzalloc(sizeof(struct ov7670_info), GFP_KERNEL);
 	if (info == NULL)
@@ -1565,6 +1524,37 @@
 	sd = &info->sd;
 	v4l2_i2c_subdev_init(sd, client, &ov7670_ops);
 
+	info->clock_speed = 30; /* default: a guess */
+	if (client->dev.platform_data) {
+		struct ov7670_config *config = client->dev.platform_data;
+
+		/*
+		 * Must apply configuration before initializing device, because it
+		 * selects I/O method.
+		 */
+		info->min_width = config->min_width;
+		info->min_height = config->min_height;
+		info->use_smbus = config->use_smbus;
+
+		if (config->clock_speed)
+			info->clock_speed = config->clock_speed;
+	}
+
+	/* Make sure it's an ov7670 */
+	ret = ov7670_detect(sd);
+	if (ret) {
+		v4l_dbg(1, debug, client,
+			"chip found @ 0x%x (%s) is not an ov7670 chip.\n",
+			client->addr << 1, client->adapter->name);
+		kfree(info);
+		return ret;
+	}
+	v4l_info(client, "chip found @ 0x%02x (%s)\n",
+			client->addr << 1, client->adapter->name);
+
+	info->fmt = &ov7670_formats[0];
+	info->sat = 128;	/* Review this */
+	info->clkrc = info->clock_speed / 30;
 	return 0;
 }
 
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
index ac94a8b..305e6aa 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
@@ -40,6 +40,7 @@
 #include "pvrusb2-io.h"
 #include <media/v4l2-device.h>
 #include <media/cx2341x.h>
+#include <media/ir-kbd-i2c.h>
 #include "pvrusb2-devattr.h"
 
 /* Legal values for PVR2_CID_HSM */
@@ -202,6 +203,7 @@
 
 	/* IR related */
 	unsigned int ir_scheme_active; /* IR scheme as seen from the outside */
+	struct IR_i2c_init_data ir_init_data; /* params passed to IR modules */
 
 	/* Frequency table */
 	unsigned int freqTable[FREQTABLE_SIZE];
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
index 7cbe18c..451ecd4 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
@@ -19,6 +19,7 @@
  */
 
 #include <linux/i2c.h>
+#include <media/ir-kbd-i2c.h>
 #include "pvrusb2-i2c-core.h"
 #include "pvrusb2-hdw-internal.h"
 #include "pvrusb2-debug.h"
@@ -48,13 +49,6 @@
 MODULE_PARM_DESC(disable_autoload_ir_video,
 		 "1=do not try to autoload ir_video IR receiver");
 
-/* Mapping of IR schemes to known I2C addresses - if any */
-static const unsigned char ir_video_addresses[] = {
-	[PVR2_IR_SCHEME_ZILOG] = 0x71,
-	[PVR2_IR_SCHEME_29XXX] = 0x18,
-	[PVR2_IR_SCHEME_24XXX] = 0x18,
-};
-
 static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */
 			  u8 i2c_addr,      /* I2C address we're talking to */
 			  u8 *data,         /* Data to write */
@@ -574,26 +568,55 @@
 static void pvr2_i2c_register_ir(struct pvr2_hdw *hdw)
 {
 	struct i2c_board_info info;
-	unsigned char addr = 0;
+	struct IR_i2c_init_data *init_data = &hdw->ir_init_data;
 	if (pvr2_disable_ir_video) {
 		pvr2_trace(PVR2_TRACE_INFO,
 			   "Automatic binding of ir_video has been disabled.");
 		return;
 	}
-	if (hdw->ir_scheme_active < ARRAY_SIZE(ir_video_addresses)) {
-		addr = ir_video_addresses[hdw->ir_scheme_active];
-	}
-	if (!addr) {
+	memset(&info, 0, sizeof(struct i2c_board_info));
+	switch (hdw->ir_scheme_active) {
+	case PVR2_IR_SCHEME_24XXX: /* FX2-controlled IR */
+	case PVR2_IR_SCHEME_29XXX: /* Original 29xxx device */
+		init_data->ir_codes              = RC_MAP_HAUPPAUGE_NEW;
+		init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP;
+		init_data->type                  = RC_TYPE_RC5;
+		init_data->name                  = hdw->hdw_desc->description;
+		init_data->polling_interval      = 100; /* ms From ir-kbd-i2c */
+		/* IR Receiver */
+		info.addr          = 0x18;
+		info.platform_data = init_data;
+		strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
+		pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.",
+			   info.type, info.addr);
+		i2c_new_device(&hdw->i2c_adap, &info);
+		break;
+	case PVR2_IR_SCHEME_ZILOG:     /* HVR-1950 style */
+	case PVR2_IR_SCHEME_24XXX_MCE: /* 24xxx MCE device */
+		init_data->ir_codes              = RC_MAP_HAUPPAUGE_NEW;
+		init_data->internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
+		init_data->type                  = RC_TYPE_RC5;
+		init_data->name                  = hdw->hdw_desc->description;
+		/* IR Receiver */
+		info.addr          = 0x71;
+		info.platform_data = init_data;
+		strlcpy(info.type, "ir_rx_z8f0811_haup", I2C_NAME_SIZE);
+		pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.",
+			   info.type, info.addr);
+		i2c_new_device(&hdw->i2c_adap, &info);
+		/* IR Trasmitter */
+		info.addr          = 0x70;
+		info.platform_data = init_data;
+		strlcpy(info.type, "ir_tx_z8f0811_haup", I2C_NAME_SIZE);
+		pvr2_trace(PVR2_TRACE_INFO, "Binding %s to i2c address 0x%02x.",
+			   info.type, info.addr);
+		i2c_new_device(&hdw->i2c_adap, &info);
+		break;
+	default:
 		/* The device either doesn't support I2C-based IR or we
 		   don't know (yet) how to operate IR on the device. */
-		return;
+		break;
 	}
-	pvr2_trace(PVR2_TRACE_INFO,
-		   "Binding ir_video to i2c address 0x%02x.", addr);
-	memset(&info, 0, sizeof(struct i2c_board_info));
-	strlcpy(info.type, "ir_video", I2C_NAME_SIZE);
-	info.addr = addr;
-	i2c_new_device(&hdw->i2c_adap, &info);
 }
 
 void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index f35459d..0db9092 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -1565,7 +1565,7 @@
 	chip_id = name[5];
 
 	/* Check whether this chip is part of the saa711x series */
-	if (memcmp(name, "1f711", 5)) {
+	if (memcmp(name + 1, "f711", 4)) {
 		v4l_dbg(1, debug, client, "chip found @ 0x%x (ID %s) does not match a known saa711x chip.\n",
 			client->addr << 1, name);
 		return -ENODEV;
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index e7aa588..deb8fcf 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -5179,18 +5179,8 @@
 	[SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG] = {
 		.name           = "Kworld PCI SBTVD/ISDB-T Full-Seg Hybrid",
 		.audio_clock    = 0x00187de7,
-#if 0
-	/*
-	 * FIXME: Analog mode doesn't work, if digital is enabled. The proper
-	 * fix is to use tda8290 driver, but Kworld seems to use an
-	 * unsupported version of tda8295.
-	 */
-		.tuner_type     = TUNER_NXP_TDA18271,	/* TUNER_PHILIPS_TDA8290 */
-		.tuner_addr     = 0x60,
-#else
-		.tuner_type     = UNSET,
+		.tuner_type     = TUNER_PHILIPS_TDA8290,
 		.tuner_addr     = ADDR_UNSET,
-#endif
 		.radio_type     = UNSET,
 		.radio_addr	= ADDR_UNSET,
 		.gpiomask       = 0x8e054000,
@@ -6932,10 +6922,17 @@
 	/* toggle AGC switch through GPIO 27 */
 	switch (mode) {
 	case TDA18271_ANALOG:
-		saa7134_set_gpio(dev, 27, 0);
+		saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x4000);
+		saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x4000);
+		msleep(20);
 		break;
 	case TDA18271_DIGITAL:
-		saa7134_set_gpio(dev, 27, 1);
+		saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x14000);
+		saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x14000);
+		msleep(20);
+		saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x54000);
+		saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x54000);
+		msleep(30);
 		break;
 	default:
 		return -EINVAL;
@@ -6993,6 +6990,7 @@
 int saa7134_tuner_callback(void *priv, int component, int command, int arg)
 {
 	struct saa7134_dev *dev = priv;
+
 	if (dev != NULL) {
 		switch (dev->tuner_type) {
 		case TUNER_PHILIPS_TDA8290:
@@ -7659,36 +7657,11 @@
 		break;
 	}
 	case SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG:
-	{
-		struct i2c_msg msg = { .addr = 0x4b, .flags = 0 };
-		int i;
-		static u8 buffer[][2] = {
-			{0x30, 0x31},
-			{0xff, 0x00},
-			{0x41, 0x03},
-			{0x41, 0x1a},
-			{0xff, 0x02},
-			{0x34, 0x00},
-			{0x45, 0x97},
-			{0x45, 0xc1},
-		};
 		saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x4000);
 		saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x4000);
 
-		/*
-		 * FIXME: identify what device is at addr 0x4b and what means
-		 * this initialization
-		 */
-		for (i = 0; i < ARRAY_SIZE(buffer); i++) {
-			msg.buf = &buffer[i][0];
-			msg.len = ARRAY_SIZE(buffer[0]);
-			if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1)
-				printk(KERN_WARNING
-				       "%s: Unable to enable tuner(%i).\n",
-				       dev->name, i);
-		}
+		saa7134_set_gpio(dev, 27, 0);
 		break;
-	}
 	} /* switch() */
 
 	/* initialize tuner */
diff --git a/drivers/media/video/saa7134/saa7134-dvb.c b/drivers/media/video/saa7134/saa7134-dvb.c
index 3315a48..f65cad2 100644
--- a/drivers/media/video/saa7134/saa7134-dvb.c
+++ b/drivers/media/video/saa7134/saa7134-dvb.c
@@ -237,12 +237,39 @@
 static struct tda18271_config kworld_tda18271_config = {
 	.std_map = &mb86a20s_tda18271_std_map,
 	.gate    = TDA18271_GATE_DIGITAL,
+	.config  = 3,	/* Use tuner callback for AGC */
+
 };
 
 static const struct mb86a20s_config kworld_mb86a20s_config = {
 	.demod_address = 0x10,
 };
 
+static int kworld_sbtvd_gate_ctrl(struct dvb_frontend* fe, int enable)
+{
+	struct saa7134_dev *dev = fe->dvb->priv;
+
+	unsigned char initmsg[] = {0x45, 0x97};
+	unsigned char msg_enable[] = {0x45, 0xc1};
+	unsigned char msg_disable[] = {0x45, 0x81};
+	struct i2c_msg msg = {.addr = 0x4b, .flags = 0, .buf = initmsg, .len = 2};
+
+	if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
+		wprintk("could not access the I2C gate\n");
+		return -EIO;
+	}
+	if (enable)
+		msg.buf = msg_enable;
+	else
+		msg.buf = msg_disable;
+	if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
+		wprintk("could not access the I2C gate\n");
+		return -EIO;
+	}
+	msleep(20);
+	return 0;
+}
+
 /* ==================================================================
  * tda1004x based DVB-T cards, helper functions
  */
@@ -623,37 +650,6 @@
 
 /* ------------------------------------------------------------------ */
 
-static int __kworld_sbtvd_i2c_gate_ctrl(struct saa7134_dev *dev, int enable)
-{
-	unsigned char initmsg[] = {0x45, 0x97};
-	unsigned char msg_enable[] = {0x45, 0xc1};
-	unsigned char msg_disable[] = {0x45, 0x81};
-	struct i2c_msg msg = {.addr = 0x4b, .flags = 0, .buf = initmsg, .len = 2};
-
-	if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
-		wprintk("could not access the I2C gate\n");
-		return -EIO;
-	}
-	if (enable)
-		msg.buf = msg_enable;
-	else
-		msg.buf = msg_disable;
-	if (i2c_transfer(&dev->i2c_adap, &msg, 1) != 1) {
-		wprintk("could not access the I2C gate\n");
-		return -EIO;
-	}
-	msleep(20);
-	return 0;
-}
-static int kworld_sbtvd_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
-{
-	struct saa7134_dev *dev = fe->dvb->priv;
-
-	return __kworld_sbtvd_i2c_gate_ctrl(dev, enable);
-}
-
-/* ------------------------------------------------------------------ */
-
 static struct tda1004x_config tda827x_lifeview_config = {
 	.demod_address = 0x08,
 	.invert        = 1,
@@ -1660,27 +1656,23 @@
 		}
 		break;
 	case SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG:
-		__kworld_sbtvd_i2c_gate_ctrl(dev, 0);
-		saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x14000);
-		saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x14000);
-		msleep(20);
-		saa_writel(SAA7134_GPIO_GPMODE0 >> 2, 0x54000);
-		saa_writel(SAA7134_GPIO_GPSTATUS0 >> 2, 0x54000);
-		msleep(20);
+		/* Switch to digital mode */
+		saa7134_tuner_callback(dev, 0,
+				       TDA18271_CALLBACK_CMD_AGC_ENABLE, 1);
 		fe0->dvb.frontend = dvb_attach(mb86a20s_attach,
 					       &kworld_mb86a20s_config,
 					       &dev->i2c_adap);
-		__kworld_sbtvd_i2c_gate_ctrl(dev, 1);
 		if (fe0->dvb.frontend != NULL) {
+			dvb_attach(tda829x_attach, fe0->dvb.frontend,
+				   &dev->i2c_adap, 0x4b,
+				   &tda829x_no_probe);
 			dvb_attach(tda18271_attach, fe0->dvb.frontend,
 				   0x60, &dev->i2c_adap,
 				   &kworld_tda18271_config);
-			/*
-			 * Only after success, it can initialize the gate, otherwise
-			 * an OOPS will hit, due to kfree(fe0->dvb.frontend)
-			 */
-			fe0->dvb.frontend->ops.i2c_gate_ctrl = kworld_sbtvd_i2c_gate_ctrl;
+			fe0->dvb.frontend->ops.i2c_gate_ctrl = kworld_sbtvd_gate_ctrl;
 		}
+
+		/* mb86a20s need to use the I2C gateway */
 		break;
 	default:
 		wprintk("Huh? unknown DVB card?\n");
diff --git a/drivers/media/video/sn9c102/sn9c102_devtable.h b/drivers/media/video/sn9c102/sn9c102_devtable.h
index 41064c7..b3d2cc7 100644
--- a/drivers/media/video/sn9c102/sn9c102_devtable.h
+++ b/drivers/media/video/sn9c102/sn9c102_devtable.h
@@ -47,8 +47,8 @@
 	{ SN9C102_USB_DEVICE(0x0c45, 0x6009, BRIDGE_SN9C102), },
 	{ SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), },
 /*	{ SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */
-#endif
 	{ SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), },
+#endif
 	{ SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), },
 	{ SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), },
 #if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
@@ -56,78 +56,68 @@
 	{ SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), },
 	{ SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), },
 #endif
-	{ SN9C102_USB_DEVICE(0x0c45, 0x602b, BRIDGE_SN9C102), },
+	{ SN9C102_USB_DEVICE(0x0c45, 0x602b, BRIDGE_SN9C102), }, /* not in sonixb */
 #if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
 	{ SN9C102_USB_DEVICE(0x0c45, 0x602c, BRIDGE_SN9C102), },
 /*	{ SN9C102_USB_DEVICE(0x0c45, 0x602d, BRIDGE_SN9C102), }, HV7131R */
 	{ SN9C102_USB_DEVICE(0x0c45, 0x602e, BRIDGE_SN9C102), },
 #endif
-	{ SN9C102_USB_DEVICE(0x0c45, 0x6030, BRIDGE_SN9C102), },
+	{ SN9C102_USB_DEVICE(0x0c45, 0x6030, BRIDGE_SN9C102), }, /* not in sonixb */
 	/* SN9C103 */
-	{ SN9C102_USB_DEVICE(0x0c45, 0x6080, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x6082, BRIDGE_SN9C103), },
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x6080, BRIDGE_SN9C103), }, non existent ? */
+	{ SN9C102_USB_DEVICE(0x0c45, 0x6082, BRIDGE_SN9C103), }, /* not in sonixb */
+#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
 /*	{ SN9C102_USB_DEVICE(0x0c45, 0x6083, BRIDGE_SN9C103), }, HY7131D/E */
-	{ SN9C102_USB_DEVICE(0x0c45, 0x6088, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x608a, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x608b, BRIDGE_SN9C103), },
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x6088, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x608a, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x608b, BRIDGE_SN9C103), }, non existent ? */
 	{ SN9C102_USB_DEVICE(0x0c45, 0x608c, BRIDGE_SN9C103), },
 /*	{ SN9C102_USB_DEVICE(0x0c45, 0x608e, BRIDGE_SN9C103), }, CISVF10 */
-#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
 	{ SN9C102_USB_DEVICE(0x0c45, 0x608f, BRIDGE_SN9C103), },
-#endif
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60a0, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60a2, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60a3, BRIDGE_SN9C103), },
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60a0, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60a2, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60a3, BRIDGE_SN9C103), }, non existent ? */
 /*	{ SN9C102_USB_DEVICE(0x0c45, 0x60a8, BRIDGE_SN9C103), }, PAS106 */
 /*	{ SN9C102_USB_DEVICE(0x0c45, 0x60aa, BRIDGE_SN9C103), }, TAS5130 */
-/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60ab, BRIDGE_SN9C103), }, TAS5130 */
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60ac, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60ae, BRIDGE_SN9C103), },
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60ab, BRIDGE_SN9C103), }, TAS5110, non existent */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60ac, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60ae, BRIDGE_SN9C103), }, non existent ? */
 	{ SN9C102_USB_DEVICE(0x0c45, 0x60af, BRIDGE_SN9C103), },
-#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE
 	{ SN9C102_USB_DEVICE(0x0c45, 0x60b0, BRIDGE_SN9C103), },
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60b2, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60b3, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60b8, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60ba, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60bb, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60bc, BRIDGE_SN9C103), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60be, BRIDGE_SN9C103), }, non existent ? */
 #endif
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60b2, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60b3, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60b8, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60ba, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60bb, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60bc, BRIDGE_SN9C103), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60be, BRIDGE_SN9C103), },
 	/* SN9C105 */
 #if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
 	{ SN9C102_USB_DEVICE(0x045e, 0x00f5, BRIDGE_SN9C105), },
 	{ SN9C102_USB_DEVICE(0x045e, 0x00f7, BRIDGE_SN9C105), },
 	{ SN9C102_USB_DEVICE(0x0471, 0x0327, BRIDGE_SN9C105), },
 	{ SN9C102_USB_DEVICE(0x0471, 0x0328, BRIDGE_SN9C105), },
-#endif
 	{ SN9C102_USB_DEVICE(0x0c45, 0x60c0, BRIDGE_SN9C105), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60c2, BRIDGE_SN9C105), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60c8, BRIDGE_SN9C105), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60cc, BRIDGE_SN9C105), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60ea, BRIDGE_SN9C105), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60ec, BRIDGE_SN9C105), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60ef, BRIDGE_SN9C105), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x60fa, BRIDGE_SN9C105), },
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60c2, BRIDGE_SN9C105), }, PO1030 */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60c8, BRIDGE_SN9C105), }, OM6801 */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60cc, BRIDGE_SN9C105), }, HV7131GP */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60ea, BRIDGE_SN9C105), }, non existent ? */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60ec, BRIDGE_SN9C105), }, MO4000 */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60ef, BRIDGE_SN9C105), }, ICM105C */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x60fa, BRIDGE_SN9C105), }, OV7648 */
 	{ SN9C102_USB_DEVICE(0x0c45, 0x60fb, BRIDGE_SN9C105), },
 	{ SN9C102_USB_DEVICE(0x0c45, 0x60fc, BRIDGE_SN9C105), },
 	{ SN9C102_USB_DEVICE(0x0c45, 0x60fe, BRIDGE_SN9C105), },
 	/* SN9C120 */
 	{ SN9C102_USB_DEVICE(0x0458, 0x7025, BRIDGE_SN9C120), },
-#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
-	{ SN9C102_USB_DEVICE(0x0c45, 0x6102, BRIDGE_SN9C120), },
-#endif
-	{ SN9C102_USB_DEVICE(0x0c45, 0x6108, BRIDGE_SN9C120), },
-	{ SN9C102_USB_DEVICE(0x0c45, 0x610f, BRIDGE_SN9C120), },
-#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x6102, BRIDGE_SN9C120), }, po2030 */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x6108, BRIDGE_SN9C120), }, om6801 */
+/*	{ SN9C102_USB_DEVICE(0x0c45, 0x610f, BRIDGE_SN9C120), }, S5K53BEB */
 	{ SN9C102_USB_DEVICE(0x0c45, 0x6130, BRIDGE_SN9C120), },
-#endif
 /*	{ SN9C102_USB_DEVICE(0x0c45, 0x6138, BRIDGE_SN9C120), }, MO8000 */
-#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
 	{ SN9C102_USB_DEVICE(0x0c45, 0x613a, BRIDGE_SN9C120), },
-#endif
 	{ SN9C102_USB_DEVICE(0x0c45, 0x613b, BRIDGE_SN9C120), },
-#if !defined CONFIG_USB_GSPCA_SONIXJ && !defined CONFIG_USB_GSPCA_SONIXJ_MODULE
 	{ SN9C102_USB_DEVICE(0x0c45, 0x613c, BRIDGE_SN9C120), },
 	{ SN9C102_USB_DEVICE(0x0c45, 0x613e, BRIDGE_SN9C120), },
 #endif
diff --git a/drivers/media/video/sr030pc30.c b/drivers/media/video/sr030pc30.c
index 864696b..c901721 100644
--- a/drivers/media/video/sr030pc30.c
+++ b/drivers/media/video/sr030pc30.c
@@ -714,15 +714,6 @@
 	return ret;
 }
 
-static int sr030pc30_s_config(struct v4l2_subdev *sd,
-			      int irq, void *platform_data)
-{
-	struct sr030pc30_info *info = to_sr030pc30(sd);
-
-	info->pdata = platform_data;
-	return 0;
-}
-
 static int sr030pc30_s_stream(struct v4l2_subdev *sd, int enable)
 {
 	return 0;
@@ -763,7 +754,6 @@
 }
 
 static const struct v4l2_subdev_core_ops sr030pc30_core_ops = {
-	.s_config	= sr030pc30_s_config,
 	.s_power	= sr030pc30_s_power,
 	.queryctrl	= sr030pc30_queryctrl,
 	.s_ctrl		= sr030pc30_s_ctrl,
diff --git a/drivers/media/video/tda9875.c b/drivers/media/video/tda9875.c
deleted file mode 100644
index 35b6ff5..0000000
--- a/drivers/media/video/tda9875.c
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
- * For the TDA9875 chip
- * (The TDA9875 is used on the Diamond DTV2000 french version
- * Other cards probably use these chips as well.)
- * This driver will not complain if used with any
- * other i2c device with the same address.
- *
- * Copyright (c) 2000 Guillaume Delvit based on Gerd Knorr source and
- * Eric Sandeen
- * Copyright (c) 2006 Mauro Carvalho Chehab <mchehab@infradead.org>
- * This code is placed under the terms of the GNU General Public License
- * Based on tda9855.c by Steve VanDeBogart (vandebo@uclink.berkeley.edu)
- * Which was based on tda8425.c by Greg Alexander (c) 1998
- *
- * OPTIONS:
- * debug   - set to 1 if you'd like to see debug messages
- *
- *  Revision: 0.1 - original version
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/videodev2.h>
-#include <media/v4l2-device.h>
-#include <media/i2c-addr.h>
-
-static int debug; /* insmod parameter */
-module_param(debug, int, S_IRUGO | S_IWUSR);
-MODULE_LICENSE("GPL");
-
-
-/* This is a superset of the TDA9875 */
-struct tda9875 {
-	struct v4l2_subdev sd;
-	int rvol, lvol;
-	int bass, treble;
-};
-
-static inline struct tda9875 *to_state(struct v4l2_subdev *sd)
-{
-	return container_of(sd, struct tda9875, sd);
-}
-
-#define dprintk  if (debug) printk
-
-/* The TDA9875 is made by Philips Semiconductor
- * http://www.semiconductors.philips.com
- * TDA9875: I2C-bus controlled DSP audio processor, FM demodulator
- *
- */
-
-		/* subaddresses for TDA9875 */
-#define TDA9875_MUT         0x12  /*General mute  (value --> 0b11001100*/
-#define TDA9875_CFG         0x01  /* Config register (value --> 0b00000000 */
-#define TDA9875_DACOS       0x13  /*DAC i/o select (ADC) 0b0000100*/
-#define TDA9875_LOSR        0x16  /*Line output select regirter 0b0100 0001*/
-
-#define TDA9875_CH1V        0x0c  /*Channel 1 volume (mute)*/
-#define TDA9875_CH2V        0x0d  /*Channel 2 volume (mute)*/
-#define TDA9875_SC1         0x14  /*SCART 1 in (mono)*/
-#define TDA9875_SC2         0x15  /*SCART 2 in (mono)*/
-
-#define TDA9875_ADCIS       0x17  /*ADC input select (mono) 0b0110 000*/
-#define TDA9875_AER         0x19  /*Audio effect (AVL+Pseudo) 0b0000 0110*/
-#define TDA9875_MCS         0x18  /*Main channel select (DAC) 0b0000100*/
-#define TDA9875_MVL         0x1a  /* Main volume gauche */
-#define TDA9875_MVR         0x1b  /* Main volume droite */
-#define TDA9875_MBA         0x1d  /* Main Basse */
-#define TDA9875_MTR         0x1e  /* Main treble */
-#define TDA9875_ACS         0x1f  /* Auxilary channel select (FM) 0b0000000*/
-#define TDA9875_AVL         0x20  /* Auxilary volume gauche */
-#define TDA9875_AVR         0x21  /* Auxilary volume droite */
-#define TDA9875_ABA         0x22  /* Auxilary Basse */
-#define TDA9875_ATR         0x23  /* Auxilary treble */
-
-#define TDA9875_MSR         0x02  /* Monitor select register */
-#define TDA9875_C1MSB       0x03  /* Carrier 1 (FM) frequency register MSB */
-#define TDA9875_C1MIB       0x04  /* Carrier 1 (FM) frequency register (16-8]b */
-#define TDA9875_C1LSB       0x05  /* Carrier 1 (FM) frequency register LSB */
-#define TDA9875_C2MSB       0x06  /* Carrier 2 (nicam) frequency register MSB */
-#define TDA9875_C2MIB       0x07  /* Carrier 2 (nicam) frequency register (16-8]b */
-#define TDA9875_C2LSB       0x08  /* Carrier 2 (nicam) frequency register LSB */
-#define TDA9875_DCR         0x09  /* Demodulateur configuration regirter*/
-#define TDA9875_DEEM        0x0a  /* FM de-emphasis regirter*/
-#define TDA9875_FMAT        0x0b  /* FM Matrix regirter*/
-
-/* values */
-#define TDA9875_MUTE_ON	    0xff /* general mute */
-#define TDA9875_MUTE_OFF    0xcc /* general no mute */
-
-
-
-/* Begin code */
-
-static int tda9875_write(struct v4l2_subdev *sd, int subaddr, unsigned char val)
-{
-	struct i2c_client *client = v4l2_get_subdevdata(sd);
-	unsigned char buffer[2];
-
-	v4l2_dbg(1, debug, sd, "Writing %d 0x%x\n", subaddr, val);
-	buffer[0] = subaddr;
-	buffer[1] = val;
-	if (2 != i2c_master_send(client, buffer, 2)) {
-		v4l2_warn(sd, "I/O error, trying (write %d 0x%x)\n",
-		       subaddr, val);
-		return -1;
-	}
-	return 0;
-}
-
-
-static int i2c_read_register(struct i2c_client *client, int addr, int reg)
-{
-	unsigned char write[1];
-	unsigned char read[1];
-	struct i2c_msg msgs[2] = {
-		{ addr, 0,        1, write },
-		{ addr, I2C_M_RD, 1, read  }
-	};
-
-	write[0] = reg;
-
-	if (2 != i2c_transfer(client->adapter, msgs, 2)) {
-		v4l_warn(client, "I/O error (read2)\n");
-		return -1;
-	}
-	v4l_dbg(1, debug, client, "chip_read2: reg%d=0x%x\n", reg, read[0]);
-	return read[0];
-}
-
-static void tda9875_set(struct v4l2_subdev *sd)
-{
-	struct tda9875 *tda = to_state(sd);
-	unsigned char a;
-
-	v4l2_dbg(1, debug, sd, "tda9875_set(%04x,%04x,%04x,%04x)\n",
-		tda->lvol, tda->rvol, tda->bass, tda->treble);
-
-	a = tda->lvol & 0xff;
-	tda9875_write(sd, TDA9875_MVL, a);
-	a =tda->rvol & 0xff;
-	tda9875_write(sd, TDA9875_MVR, a);
-	a =tda->bass & 0xff;
-	tda9875_write(sd, TDA9875_MBA, a);
-	a =tda->treble  & 0xff;
-	tda9875_write(sd, TDA9875_MTR, a);
-}
-
-static void do_tda9875_init(struct v4l2_subdev *sd)
-{
-	struct tda9875 *t = to_state(sd);
-
-	v4l2_dbg(1, debug, sd, "In tda9875_init\n");
-	tda9875_write(sd, TDA9875_CFG, 0xd0); /*reg de config 0 (reset)*/
-	tda9875_write(sd, TDA9875_MSR, 0x03);    /* Monitor 0b00000XXX*/
-	tda9875_write(sd, TDA9875_C1MSB, 0x00);  /*Car1(FM) MSB XMHz*/
-	tda9875_write(sd, TDA9875_C1MIB, 0x00);  /*Car1(FM) MIB XMHz*/
-	tda9875_write(sd, TDA9875_C1LSB, 0x00);  /*Car1(FM) LSB XMHz*/
-	tda9875_write(sd, TDA9875_C2MSB, 0x00);  /*Car2(NICAM) MSB XMHz*/
-	tda9875_write(sd, TDA9875_C2MIB, 0x00);  /*Car2(NICAM) MIB XMHz*/
-	tda9875_write(sd, TDA9875_C2LSB, 0x00);  /*Car2(NICAM) LSB XMHz*/
-	tda9875_write(sd, TDA9875_DCR, 0x00);    /*Demod config 0x00*/
-	tda9875_write(sd, TDA9875_DEEM, 0x44);   /*DE-Emph 0b0100 0100*/
-	tda9875_write(sd, TDA9875_FMAT, 0x00);   /*FM Matrix reg 0x00*/
-	tda9875_write(sd, TDA9875_SC1, 0x00);    /* SCART 1 (SC1)*/
-	tda9875_write(sd, TDA9875_SC2, 0x01);    /* SCART 2 (sc2)*/
-
-	tda9875_write(sd, TDA9875_CH1V, 0x10);  /* Channel volume 1 mute*/
-	tda9875_write(sd, TDA9875_CH2V, 0x10);  /* Channel volume 2 mute */
-	tda9875_write(sd, TDA9875_DACOS, 0x02); /* sig DAC i/o(in:nicam)*/
-	tda9875_write(sd, TDA9875_ADCIS, 0x6f); /* sig ADC input(in:mono)*/
-	tda9875_write(sd, TDA9875_LOSR, 0x00);  /* line out (in:mono)*/
-	tda9875_write(sd, TDA9875_AER, 0x00);   /*06 Effect (AVL+PSEUDO) */
-	tda9875_write(sd, TDA9875_MCS, 0x44);   /* Main ch select (DAC) */
-	tda9875_write(sd, TDA9875_MVL, 0x03);   /* Vol Main left 10dB */
-	tda9875_write(sd, TDA9875_MVR, 0x03);   /* Vol Main right 10dB*/
-	tda9875_write(sd, TDA9875_MBA, 0x00);   /* Main Bass Main 0dB*/
-	tda9875_write(sd, TDA9875_MTR, 0x00);   /* Main Treble Main 0dB*/
-	tda9875_write(sd, TDA9875_ACS, 0x44);   /* Aux chan select (dac)*/
-	tda9875_write(sd, TDA9875_AVL, 0x00);   /* Vol Aux left 0dB*/
-	tda9875_write(sd, TDA9875_AVR, 0x00);   /* Vol Aux right 0dB*/
-	tda9875_write(sd, TDA9875_ABA, 0x00);   /* Aux Bass Main 0dB*/
-	tda9875_write(sd, TDA9875_ATR, 0x00);   /* Aux Aigus Main 0dB*/
-
-	tda9875_write(sd, TDA9875_MUT, 0xcc);   /* General mute  */
-
-	t->lvol = t->rvol = 0;  	/* 0dB */
-	t->bass = 0; 			/* 0dB */
-	t->treble = 0;  		/* 0dB */
-	tda9875_set(sd);
-}
-
-
-static int tda9875_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
-	struct tda9875 *t = to_state(sd);
-
-	switch (ctrl->id) {
-	case V4L2_CID_AUDIO_VOLUME:
-	{
-		int left = (t->lvol+84)*606;
-		int right = (t->rvol+84)*606;
-
-		ctrl->value=max(left,right);
-		return 0;
-	}
-	case V4L2_CID_AUDIO_BALANCE:
-	{
-		int left = (t->lvol+84)*606;
-		int right = (t->rvol+84)*606;
-		int volume = max(left,right);
-		int balance = (32768*min(left,right))/
-			      (volume ? volume : 1);
-		ctrl->value=(left<right)?
-			(65535-balance) : balance;
-		return 0;
-	}
-	case V4L2_CID_AUDIO_BASS:
-		ctrl->value = (t->bass+12)*2427;    /* min -12 max +15 */
-		return 0;
-	case V4L2_CID_AUDIO_TREBLE:
-		ctrl->value = (t->treble+12)*2730;/* min -12 max +12 */
-		return 0;
-	}
-	return -EINVAL;
-}
-
-static int tda9875_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
-{
-	struct tda9875 *t = to_state(sd);
-	int chvol = 0, volume = 0, balance = 0, left, right;
-
-	switch (ctrl->id) {
-	case V4L2_CID_AUDIO_VOLUME:
-		left = (t->lvol+84)*606;
-		right = (t->rvol+84)*606;
-
-		volume = max(left,right);
-		balance = (32768*min(left,right))/
-			      (volume ? volume : 1);
-		balance =(left<right)?
-			(65535-balance) : balance;
-
-		volume = ctrl->value;
-
-		chvol=1;
-		break;
-	case V4L2_CID_AUDIO_BALANCE:
-		left = (t->lvol+84)*606;
-		right = (t->rvol+84)*606;
-
-		volume=max(left,right);
-
-		balance = ctrl->value;
-
-		chvol=1;
-		break;
-	case V4L2_CID_AUDIO_BASS:
-		t->bass = ((ctrl->value/2400)-12) & 0xff;
-		if (t->bass > 15)
-			t->bass = 15;
-		if (t->bass < -12)
-			t->bass = -12 & 0xff;
-		break;
-	case V4L2_CID_AUDIO_TREBLE:
-		t->treble = ((ctrl->value/2700)-12) & 0xff;
-		if (t->treble > 12)
-			t->treble = 12;
-		if (t->treble < -12)
-			t->treble = -12 & 0xff;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	if (chvol) {
-		left = (min(65536 - balance,32768) *
-			volume) / 32768;
-		right = (min(balance,32768) *
-				volume) / 32768;
-		t->lvol = ((left/606)-84) & 0xff;
-		if (t->lvol > 24)
-			t->lvol = 24;
-		if (t->lvol < -84)
-			t->lvol = -84 & 0xff;
-
-		t->rvol = ((right/606)-84) & 0xff;
-		if (t->rvol > 24)
-			t->rvol = 24;
-		if (t->rvol < -84)
-			t->rvol = -84 & 0xff;
-	}
-
-	tda9875_set(sd);
-	return 0;
-}
-
-static int tda9875_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
-{
-	switch (qc->id) {
-	case V4L2_CID_AUDIO_VOLUME:
-		return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 58880);
-	case V4L2_CID_AUDIO_BASS:
-	case V4L2_CID_AUDIO_TREBLE:
-		return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 32768);
-	}
-	return -EINVAL;
-}
-
-/* ----------------------------------------------------------------------- */
-
-static const struct v4l2_subdev_core_ops tda9875_core_ops = {
-	.queryctrl = tda9875_queryctrl,
-	.g_ctrl = tda9875_g_ctrl,
-	.s_ctrl = tda9875_s_ctrl,
-};
-
-static const struct v4l2_subdev_ops tda9875_ops = {
-	.core = &tda9875_core_ops,
-};
-
-/* ----------------------------------------------------------------------- */
-
-
-/* *********************** *
- * i2c interface functions *
- * *********************** */
-
-static int tda9875_checkit(struct i2c_client *client, int addr)
-{
-	int dic, rev;
-
-	dic = i2c_read_register(client, addr, 254);
-	rev = i2c_read_register(client, addr, 255);
-
-	if (dic == 0 || dic == 2) { /* tda9875 and tda9875A */
-		v4l_info(client, "tda9875%s rev. %d detected at 0x%02x\n",
-			dic == 0 ? "" : "A", rev, addr << 1);
-		return 1;
-	}
-	v4l_info(client, "no such chip at 0x%02x (dic=0x%x rev=0x%x)\n",
-			addr << 1, dic, rev);
-	return 0;
-}
-
-static int tda9875_probe(struct i2c_client *client,
-			const struct i2c_device_id *id)
-{
-	struct tda9875 *t;
-	struct v4l2_subdev *sd;
-
-	v4l_info(client, "chip found @ 0x%02x (%s)\n",
-			client->addr << 1, client->adapter->name);
-
-	if (!tda9875_checkit(client, client->addr))
-		return -ENODEV;
-
-	t = kzalloc(sizeof(*t), GFP_KERNEL);
-	if (!t)
-		return -ENOMEM;
-	sd = &t->sd;
-	v4l2_i2c_subdev_init(sd, client, &tda9875_ops);
-
-	do_tda9875_init(sd);
-	return 0;
-}
-
-static int tda9875_remove(struct i2c_client *client)
-{
-	struct v4l2_subdev *sd = i2c_get_clientdata(client);
-
-	do_tda9875_init(sd);
-	v4l2_device_unregister_subdev(sd);
-	kfree(to_state(sd));
-	return 0;
-}
-
-static const struct i2c_device_id tda9875_id[] = {
-	{ "tda9875", 0 },
-	{ }
-};
-MODULE_DEVICE_TABLE(i2c, tda9875_id);
-
-static struct i2c_driver tda9875_driver = {
-	.driver = {
-		.owner	= THIS_MODULE,
-		.name	= "tda9875",
-	},
-	.probe		= tda9875_probe,
-	.remove		= tda9875_remove,
-	.id_table	= tda9875_id,
-};
-
-static __init int init_tda9875(void)
-{
-	return i2c_add_driver(&tda9875_driver);
-}
-
-static __exit void exit_tda9875(void)
-{
-	i2c_del_driver(&tda9875_driver);
-}
-
-module_init(init_tda9875);
-module_exit(exit_tda9875);
diff --git a/drivers/media/video/tlg2300/pd-video.c b/drivers/media/video/tlg2300/pd-video.c
index a1ffe18..df33a1d 100644
--- a/drivers/media/video/tlg2300/pd-video.c
+++ b/drivers/media/video/tlg2300/pd-video.c
@@ -512,19 +512,20 @@
 			int buf_size, gfp_t gfp_flags,
 			usb_complete_t complete_fn, void *context)
 {
-	struct urb *urb;
-	void *mem;
-	int i;
+	int i = 0;
 
-	for (i = 0; i < num; i++) {
-		urb = usb_alloc_urb(0, gfp_flags);
+	for (; i < num; i++) {
+		void *mem;
+		struct urb *urb = usb_alloc_urb(0, gfp_flags);
 		if (urb == NULL)
 			return i;
 
 		mem = usb_alloc_coherent(udev, buf_size, gfp_flags,
 					 &urb->transfer_dma);
-		if (mem == NULL)
+		if (mem == NULL) {
+			usb_free_urb(urb);
 			return i;
+		}
 
 		usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, ep_addr),
 				mem, buf_size, complete_fn, context);
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 3f0871b..810eef4 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -407,18 +407,6 @@
 	/* Decrease the module use count to match the first try_module_get. */
 	module_put(client->driver->driver.owner);
 
-	if (sd) {
-		/* We return errors from v4l2_subdev_call only if we have the
-		   callback as the .s_config is not mandatory */
-		int err = v4l2_subdev_call(sd, core, s_config,
-				info->irq, info->platform_data);
-
-		if (err && err != -ENOIOCTLCMD) {
-			v4l2_device_unregister_subdev(sd);
-			sd = NULL;
-		}
-	}
-
 error:
 	/* If we have a client but no subdev, then something went wrong and
 	   we must unregister the client. */
@@ -428,9 +416,8 @@
 }
 EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
 
-struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
+struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
 		struct i2c_adapter *adapter, const char *client_type,
-		int irq, void *platform_data,
 		u8 addr, const unsigned short *probe_addrs)
 {
 	struct i2c_board_info info;
@@ -440,12 +427,10 @@
 	memset(&info, 0, sizeof(info));
 	strlcpy(info.type, client_type, sizeof(info.type));
 	info.addr = addr;
-	info.irq = irq;
-	info.platform_data = platform_data;
 
 	return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info, probe_addrs);
 }
-EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_cfg);
+EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev);
 
 /* Return i2c client address of v4l2_subdev. */
 unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd)
diff --git a/drivers/media/video/v4l2-ctrls.c b/drivers/media/video/v4l2-ctrls.c
index 8f81efc..ef66d2a 100644
--- a/drivers/media/video/v4l2-ctrls.c
+++ b/drivers/media/video/v4l2-ctrls.c
@@ -569,7 +569,7 @@
 	int ret;
 	u32 size;
 
-	ctrl->has_new = 1;
+	ctrl->is_new = 1;
 	switch (ctrl->type) {
 	case V4L2_CTRL_TYPE_INTEGER64:
 		ctrl->val64 = c->value64;
@@ -1280,8 +1280,12 @@
 		if (ctrl->done)
 			continue;
 
-		for (i = 0; i < master->ncontrols; i++)
-			cur_to_new(master->cluster[i]);
+		for (i = 0; i < master->ncontrols; i++) {
+			if (master->cluster[i]) {
+				cur_to_new(master->cluster[i]);
+				master->cluster[i]->is_new = 1;
+			}
+		}
 
 		/* Skip button controls and read-only controls. */
 		if (ctrl->type == V4L2_CTRL_TYPE_BUTTON ||
@@ -1340,12 +1344,15 @@
 
 	ctrl = ref->ctrl;
 	memset(qc, 0, sizeof(*qc));
-	qc->id = ctrl->id;
+	if (id >= V4L2_CID_PRIVATE_BASE)
+		qc->id = id;
+	else
+		qc->id = ctrl->id;
 	strlcpy(qc->name, ctrl->name, sizeof(qc->name));
 	qc->minimum = ctrl->minimum;
 	qc->maximum = ctrl->maximum;
 	qc->default_value = ctrl->default_value;
-	if (qc->type == V4L2_CTRL_TYPE_MENU)
+	if (ctrl->type == V4L2_CTRL_TYPE_MENU)
 		qc->step = 1;
 	else
 		qc->step = ctrl->step;
@@ -1645,7 +1652,7 @@
 		if (ctrl == NULL)
 			continue;
 
-		if (ctrl->has_new) {
+		if (ctrl->is_new) {
 			/* Double check this: it may have changed since the
 			   last check in try_or_set_ext_ctrls(). */
 			if (set && (ctrl->flags & V4L2_CTRL_FLAG_GRABBED))
@@ -1719,13 +1726,13 @@
 
 		v4l2_ctrl_lock(ctrl);
 
-		/* Reset the 'has_new' flags of the cluster */
+		/* Reset the 'is_new' flags of the cluster */
 		for (j = 0; j < master->ncontrols; j++)
 			if (master->cluster[j])
-				master->cluster[j]->has_new = 0;
+				master->cluster[j]->is_new = 0;
 
 		/* Copy the new caller-supplied control values.
-		   user_to_new() sets 'has_new' to 1. */
+		   user_to_new() sets 'is_new' to 1. */
 		ret = cluster_walk(i, cs, helpers, user_to_new);
 
 		if (!ret)
@@ -1820,15 +1827,18 @@
 	int ret;
 	int i;
 
+	if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
+		return -EACCES;
+
 	v4l2_ctrl_lock(ctrl);
 
-	/* Reset the 'has_new' flags of the cluster */
+	/* Reset the 'is_new' flags of the cluster */
 	for (i = 0; i < master->ncontrols; i++)
 		if (master->cluster[i])
-			master->cluster[i]->has_new = 0;
+			master->cluster[i]->is_new = 0;
 
 	ctrl->val = *val;
-	ctrl->has_new = 1;
+	ctrl->is_new = 1;
 	ret = try_or_set_control_cluster(master, false);
 	if (!ret)
 		ret = try_or_set_control_cluster(master, true);
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 359e232..341764a 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -419,6 +419,10 @@
  *	The registration code assigns minor numbers and device node numbers
  *	based on the requested type and registers the new device node with
  *	the kernel.
+ *
+ *	This function assumes that struct video_device was zeroed when it
+ *	was allocated and does not contain any stale date.
+ *
  *	An error is returned if no free minor or device node number could be
  *	found, or if the registration of the device node failed.
  *
@@ -440,7 +444,6 @@
 	int minor_offset = 0;
 	int minor_cnt = VIDEO_NUM_DEVICES;
 	const char *name_base;
-	void *priv = vdev->dev.p;
 
 	/* A minor value of -1 marks this video device as never
 	   having been registered */
@@ -559,10 +562,6 @@
 	}
 
 	/* Part 4: register the device with sysfs */
-	memset(&vdev->dev, 0, sizeof(vdev->dev));
-	/* The memset above cleared the device's device_private, so
-	   put back the copy we made earlier. */
-	vdev->dev.p = priv;
 	vdev->dev.class = &video_class;
 	vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor);
 	if (vdev->parent)
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
index 7fe6f92..ce64fe1 100644
--- a/drivers/media/video/v4l2-device.c
+++ b/drivers/media/video/v4l2-device.c
@@ -100,6 +100,7 @@
 			   is a platform bus, then it is never deleted. */
 			if (client)
 				i2c_unregister_device(client);
+			continue;
 		}
 #endif
 #if defined(CONFIG_SPI)
@@ -108,6 +109,7 @@
 
 			if (spi)
 				spi_unregister_device(spi);
+			continue;
 		}
 #endif
 	}
@@ -126,11 +128,19 @@
 	WARN_ON(sd->v4l2_dev != NULL);
 	if (!try_module_get(sd->owner))
 		return -ENODEV;
+	sd->v4l2_dev = v4l2_dev;
+	if (sd->internal_ops && sd->internal_ops->registered) {
+		err = sd->internal_ops->registered(sd);
+		if (err)
+			return err;
+	}
 	/* This just returns 0 if either of the two args is NULL */
 	err = v4l2_ctrl_add_handler(v4l2_dev->ctrl_handler, sd->ctrl_handler);
-	if (err)
+	if (err) {
+		if (sd->internal_ops && sd->internal_ops->unregistered)
+			sd->internal_ops->unregistered(sd);
 		return err;
-	sd->v4l2_dev = v4l2_dev;
+	}
 	spin_lock(&v4l2_dev->lock);
 	list_add_tail(&sd->list, &v4l2_dev->subdevs);
 	spin_unlock(&v4l2_dev->lock);
@@ -146,6 +156,8 @@
 	spin_lock(&sd->v4l2_dev->lock);
 	list_del(&sd->list);
 	spin_unlock(&sd->v4l2_dev->lock);
+	if (sd->internal_ops && sd->internal_ops->unregistered)
+		sd->internal_ops->unregistered(sd);
 	sd->v4l2_dev = NULL;
 	module_put(sd->owner);
 }
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 7e47f15..f51327e 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -1659,20 +1659,24 @@
 	{
 		struct v4l2_dbg_register *p = arg;
 
-		if (!capable(CAP_SYS_ADMIN))
-			ret = -EPERM;
-		else if (ops->vidioc_g_register)
-			ret = ops->vidioc_g_register(file, fh, p);
+		if (ops->vidioc_g_register) {
+			if (!capable(CAP_SYS_ADMIN))
+				ret = -EPERM;
+			else
+				ret = ops->vidioc_g_register(file, fh, p);
+		}
 		break;
 	}
 	case VIDIOC_DBG_S_REGISTER:
 	{
 		struct v4l2_dbg_register *p = arg;
 
-		if (!capable(CAP_SYS_ADMIN))
-			ret = -EPERM;
-		else if (ops->vidioc_s_register)
-			ret = ops->vidioc_s_register(file, fh, p);
+		if (ops->vidioc_s_register) {
+			if (!capable(CAP_SYS_ADMIN))
+				ret = -EPERM;
+			else
+				ret = ops->vidioc_s_register(file, fh, p);
+		}
 		break;
 	}
 #endif
diff --git a/drivers/media/video/w9966.c b/drivers/media/video/w9966.c
index 019ee20..fa35639 100644
--- a/drivers/media/video/w9966.c
+++ b/drivers/media/video/w9966.c
@@ -937,6 +937,7 @@
 		parport_unregister_device(cam->pdev);
 		w9966_set_state(cam, W9966_STATE_PDEV, 0);
 	}
+	memset(cam, 0, sizeof(*cam));
 }
 
 
diff --git a/drivers/media/video/zoran/zoran_card.c b/drivers/media/video/zoran/zoran_card.c
index 9cdc3bb..9f2bac5 100644
--- a/drivers/media/video/zoran/zoran_card.c
+++ b/drivers/media/video/zoran/zoran_card.c
@@ -1041,7 +1041,7 @@
 	/* allocate memory *before* doing anything to the hardware
 	 * in case allocation fails */
 	zr->stat_com = kzalloc(BUZ_NUM_STAT_COM * 4, GFP_KERNEL);
-	zr->video_dev = kmalloc(sizeof(struct video_device), GFP_KERNEL);
+	zr->video_dev = video_device_alloc();
 	if (!zr->stat_com || !zr->video_dev) {
 		dprintk(1,
 			KERN_ERR
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index e9a3eab..8c1d85e 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -621,7 +621,7 @@
 {
 	int rc;
 
-	workqueue = create_freezeable_workqueue("kmemstick");
+	workqueue = create_freezable_workqueue("kmemstick");
 	if (!workqueue)
 		return -ENOMEM;
 
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index f71f229..1735c84 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
 #define COPYRIGHT	"Copyright (c) 1999-2008 " MODULEAUTHOR
 #endif
 
-#define MPT_LINUX_VERSION_COMMON	"3.04.17"
-#define MPT_LINUX_PACKAGE_NAME		"@(#)mptlinux-3.04.17"
+#define MPT_LINUX_VERSION_COMMON	"3.04.18"
+#define MPT_LINUX_PACKAGE_NAME		"@(#)mptlinux-3.04.18"
 #define WHAT_MAGIC_STRING		"@" "(" "#" ")"
 
 #define show_mptmod_ver(s,ver)  \
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index a3856ed..e8deb8e 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -597,6 +597,13 @@
 }
 
 static int
+mptctl_release(struct inode *inode, struct file *filep)
+{
+	fasync_helper(-1, filep, 0, &async_queue);
+	return 0;
+}
+
+static int
 mptctl_fasync(int fd, struct file *filep, int mode)
 {
 	MPT_ADAPTER	*ioc;
@@ -2815,6 +2822,7 @@
 	.llseek =	no_llseek,
 	.fasync = 	mptctl_fasync,
 	.unlocked_ioctl = mptctl_ioctl,
+	.release =	mptctl_release,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl = compat_mpctl_ioctl,
 #endif
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 59b8f53..0d9b82a 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1873,8 +1873,9 @@
 	}
 
  out:
-	printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n",
-	    ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt);
+	printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n",
+	    ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval,
+	    SCpnt, SCpnt->serial_number);
 
 	return retval;
 }
@@ -1911,7 +1912,7 @@
 
 	vdevice = SCpnt->device->hostdata;
 	if (!vdevice || !vdevice->vtarget) {
-		retval = SUCCESS;
+		retval = 0;
 		goto out;
 	}
 
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index 5f6852d..44d4475 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -329,7 +329,7 @@
 {
 	int rc;
 
-	workqueue = create_freezeable_workqueue("tifm");
+	workqueue = create_freezable_workqueue("tifm");
 	if (!workqueue)
 		return -ENOMEM;
 
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 4d2ea8e..6df5a55 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -785,7 +785,7 @@
 	if (x86_hyper != &x86_hyper_vmware)
 		return -ENODEV;
 
-	vmballoon_wq = create_freezeable_workqueue("vmmemctl");
+	vmballoon_wq = create_freezable_workqueue("vmmemctl");
 	if (!vmballoon_wq) {
 		pr_err("failed to create workqueue\n");
 		return -ENOMEM;
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index bac7d62..0371bf5 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -462,7 +462,7 @@
 		goto out;
 	}
 
-	mmc = mmc_alloc_host(sizeof(*mmc), &pdev->dev);
+	mmc = mmc_alloc_host(sizeof(struct sdh_host), &pdev->dev);
 	if (!mmc) {
 		ret = -ENOMEM;
 		goto out;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index b3a0ab0..74218ad 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -14,6 +14,7 @@
  */
 
 #include <linux/mmc/host.h>
+#include <linux/err.h>
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
@@ -827,8 +828,8 @@
 	}
 
 	host->clk = clk_get(&pdev->dev, "mmc");
-	if (!host->clk) {
-		ret = -ENOENT;
+	if (IS_ERR(host->clk)) {
+		ret = PTR_ERR(host->clk);
 		dev_err(&pdev->dev, "Failed to get mmc clock\n");
 		goto err_free_host;
 	}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 5630228..2d6de3e 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -14,6 +14,7 @@
 #include <linux/ioport.h>
 #include <linux/device.h>
 #include <linux/interrupt.h>
+#include <linux/kernel.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/highmem.h>
@@ -46,10 +47,6 @@
  *	      is asserted (likewise for RX)
  * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
  *		  is asserted (likewise for RX)
- * @broken_blockend: the MCI_DATABLOCKEND is broken on the hardware
- *		and will not work at all.
- * @broken_blockend_dma: the MCI_DATABLOCKEND is broken on the hardware when
- *		using DMA.
  * @sdio: variant supports SDIO
  * @st_clkdiv: true if using a ST-specific clock divider algorithm
  */
@@ -59,8 +56,6 @@
 	unsigned int		datalength_bits;
 	unsigned int		fifosize;
 	unsigned int		fifohalfsize;
-	bool			broken_blockend;
-	bool			broken_blockend_dma;
 	bool			sdio;
 	bool			st_clkdiv;
 };
@@ -76,7 +71,6 @@
 	.fifohalfsize		= 8 * 4,
 	.clkreg_enable		= 1 << 13, /* HWFCEN */
 	.datalength_bits	= 16,
-	.broken_blockend_dma	= true,
 	.sdio			= true,
 };
 
@@ -86,7 +80,6 @@
 	.clkreg			= MCI_CLK_ENABLE,
 	.clkreg_enable		= 1 << 14, /* HWFCEN */
 	.datalength_bits	= 24,
-	.broken_blockend	= true,
 	.sdio			= true,
 	.st_clkdiv		= true,
 };
@@ -210,8 +203,6 @@
 	host->data = data;
 	host->size = data->blksz * data->blocks;
 	host->data_xfered = 0;
-	host->blockend = false;
-	host->dataend = false;
 
 	mmci_init_sg(host, data);
 
@@ -288,21 +279,26 @@
 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
 	      unsigned int status)
 {
-	struct variant_data *variant = host->variant;
-
 	/* First check for errors */
 	if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
-		dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
-		if (status & MCI_DATACRCFAIL)
-			data->error = -EILSEQ;
-		else if (status & MCI_DATATIMEOUT)
-			data->error = -ETIMEDOUT;
-		else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN))
-			data->error = -EIO;
+		u32 remain, success;
 
-		/* Force-complete the transaction */
-		host->blockend = true;
-		host->dataend = true;
+		/* Calculate how far we are into the transfer */
+		remain = readl(host->base + MMCIDATACNT);
+		success = data->blksz * data->blocks - remain;
+
+		dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status);
+		if (status & MCI_DATACRCFAIL) {
+			/* Last block was not successful */
+			host->data_xfered = round_down(success - 1, data->blksz);
+			data->error = -EILSEQ;
+		} else if (status & MCI_DATATIMEOUT) {
+			host->data_xfered = round_down(success, data->blksz);
+			data->error = -ETIMEDOUT;
+		} else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+			host->data_xfered = round_down(success, data->blksz);
+			data->error = -EIO;
+		}
 
 		/*
 		 * We hit an error condition.  Ensure that any data
@@ -321,61 +317,14 @@
 		}
 	}
 
-	/*
-	 * On ARM variants in PIO mode, MCI_DATABLOCKEND
-	 * is always sent first, and we increase the
-	 * transfered number of bytes for that IRQ. Then
-	 * MCI_DATAEND follows and we conclude the transaction.
-	 *
-	 * On the Ux500 single-IRQ variant MCI_DATABLOCKEND
-	 * doesn't seem to immediately clear from the status,
-	 * so we can't use it keep count when only one irq is
-	 * used because the irq will hit for other reasons, and
-	 * then the flag is still up. So we use the MCI_DATAEND
-	 * IRQ at the end of the entire transfer because
-	 * MCI_DATABLOCKEND is broken.
-	 *
-	 * In the U300, the IRQs can arrive out-of-order,
-	 * e.g. MCI_DATABLOCKEND sometimes arrives after MCI_DATAEND,
-	 * so for this case we use the flags "blockend" and
-	 * "dataend" to make sure both IRQs have arrived before
-	 * concluding the transaction. (This does not apply
-	 * to the Ux500 which doesn't fire MCI_DATABLOCKEND
-	 * at all.) In DMA mode it suffers from the same problem
-	 * as the Ux500.
-	 */
-	if (status & MCI_DATABLOCKEND) {
-		/*
-		 * Just being a little over-cautious, we do not
-		 * use this progressive update if the hardware blockend
-		 * flag is unreliable: since it can stay high between
-		 * IRQs it will corrupt the transfer counter.
-		 */
-		if (!variant->broken_blockend)
-			host->data_xfered += data->blksz;
-		host->blockend = true;
-	}
+	if (status & MCI_DATABLOCKEND)
+		dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
 
-	if (status & MCI_DATAEND)
-		host->dataend = true;
-
-	/*
-	 * On variants with broken blockend we shall only wait for dataend,
-	 * on others we must sync with the blockend signal since they can
-	 * appear out-of-order.
-	 */
-	if (host->dataend && (host->blockend || variant->broken_blockend)) {
+	if (status & MCI_DATAEND || data->error) {
 		mmci_stop_data(host);
 
-		/* Reset these flags */
-		host->blockend = false;
-		host->dataend = false;
-
-		/*
-		 * Variants with broken blockend flags need to handle the
-		 * end of the entire transfer here.
-		 */
-		if (variant->broken_blockend && !data->error)
+		if (!data->error)
+			/* The error clause is handled above, success! */
 			host->data_xfered += data->blksz * data->blocks;
 
 		if (!data->stop) {
@@ -394,15 +343,15 @@
 
 	host->cmd = NULL;
 
-	cmd->resp[0] = readl(base + MMCIRESPONSE0);
-	cmd->resp[1] = readl(base + MMCIRESPONSE1);
-	cmd->resp[2] = readl(base + MMCIRESPONSE2);
-	cmd->resp[3] = readl(base + MMCIRESPONSE3);
-
 	if (status & MCI_CMDTIMEOUT) {
 		cmd->error = -ETIMEDOUT;
 	} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
 		cmd->error = -EILSEQ;
+	} else {
+		cmd->resp[0] = readl(base + MMCIRESPONSE0);
+		cmd->resp[1] = readl(base + MMCIRESPONSE1);
+		cmd->resp[2] = readl(base + MMCIRESPONSE2);
+		cmd->resp[3] = readl(base + MMCIRESPONSE3);
 	}
 
 	if (!cmd->data || cmd->error) {
@@ -770,7 +719,6 @@
 	struct variant_data *variant = id->data;
 	struct mmci_host *host;
 	struct mmc_host *mmc;
-	unsigned int mask;
 	int ret;
 
 	/* must have platform data */
@@ -951,12 +899,7 @@
 			goto irq0_free;
 	}
 
-	mask = MCI_IRQENABLE;
-	/* Don't use the datablockend flag if it's broken */
-	if (variant->broken_blockend)
-		mask &= ~MCI_DATABLOCKEND;
-
-	writel(mask, host->base + MMCIMASK0);
+	writel(MCI_IRQENABLE, host->base + MMCIMASK0);
 
 	amba_set_drvdata(dev, mmc);
 
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index df06f01..c1df7b8 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -137,7 +137,7 @@
 #define MCI_IRQENABLE	\
 	(MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK|	\
 	MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK|	\
-	MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK)
+	MCI_CMDRESPENDMASK|MCI_CMDSENTMASK)
 
 /* These interrupts are directed to IRQ1 when two IRQ lines are available */
 #define MCI_IRQ1MASK \
@@ -177,9 +177,6 @@
 	struct timer_list	timer;
 	unsigned int		oldstat;
 
-	bool			blockend;
-	bool			dataend;
-
 	/* pio stuff */
 	struct sg_mapping_iter	sg_miter;
 	unsigned int		size;
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 5decfd0..153ab97 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -383,14 +383,30 @@
 	host->curr.user_pages = 0;
 
 	box = &nc->cmd[0];
-	for (i = 0; i < host->dma.num_ents; i++) {
+
+	/* location of command block must be 64 bit aligned */
+	BUG_ON(host->dma.cmd_busaddr & 0x07);
+
+	nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
+	host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
+			       DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
+	host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
+
+	n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
+			host->dma.num_ents, host->dma.dir);
+	if (n == 0) {
+		printk(KERN_ERR "%s: Unable to map in all sg elements\n",
+			mmc_hostname(host->mmc));
+		host->dma.sg = NULL;
+		host->dma.num_ents = 0;
+		return -ENOMEM;
+	}
+
+	for_each_sg(host->dma.sg, sg, n, i) {
+
 		box->cmd = CMD_MODE_BOX;
 
-	/* Initialize sg dma address */
-	sg->dma_address = page_to_dma(mmc_dev(host->mmc), sg_page(sg))
-				+ sg->offset;
-
-	if (i == (host->dma.num_ents - 1))
+		if (i == n - 1)
 			box->cmd |= CMD_LC;
 		rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
 			(sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
@@ -418,27 +434,6 @@
 			box->cmd |= CMD_DST_CRCI(crci);
 		}
 		box++;
-		sg++;
-	}
-
-	/* location of command block must be 64 bit aligned */
-	BUG_ON(host->dma.cmd_busaddr & 0x07);
-
-	nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
-	host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
-			       DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
-	host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
-
-	n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
-			host->dma.num_ents, host->dma.dir);
-/* dsb inside dma_map_sg will write nc out to mem as well */
-
-	if (n != host->dma.num_ents) {
-		printk(KERN_ERR "%s: Unable to map in all sg elements\n",
-			mmc_hostname(host->mmc));
-		host->dma.sg = NULL;
-		host->dma.num_ents = 0;
-		return -ENOMEM;
 	}
 
 	return 0;
@@ -1331,9 +1326,6 @@
 	if (host->timer.function)
 		pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
 
-#if BUSCLK_PWRSAVE
-	msmsdcc_disable_clocks(host, 1);
-#endif
 	return 0;
  cmd_irq_free:
 	free_irq(cmd_irqres->start, host);
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 1720358..5309ab9 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -277,10 +277,43 @@
 	host->clock = clock;
 }
 
+/**
+ * sdhci_s3c_platform_8bit_width - support 8bit buswidth
+ * @host: The SDHCI host being queried
+ * @width: MMC_BUS_WIDTH_ macro for the bus width being requested
+ *
+ * We have 8-bit width support but is not a v3 controller.
+ * So we add platform_8bit_width() and support 8bit width.
+ */
+static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
+{
+	u8 ctrl;
+
+	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+
+	switch (width) {
+	case MMC_BUS_WIDTH_8:
+		ctrl |= SDHCI_CTRL_8BITBUS;
+		ctrl &= ~SDHCI_CTRL_4BITBUS;
+		break;
+	case MMC_BUS_WIDTH_4:
+		ctrl |= SDHCI_CTRL_4BITBUS;
+		ctrl &= ~SDHCI_CTRL_8BITBUS;
+		break;
+	default:
+		break;
+	}
+
+	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+
+	return 0;
+}
+
 static struct sdhci_ops sdhci_s3c_ops = {
 	.get_max_clock		= sdhci_s3c_get_max_clk,
 	.set_clock		= sdhci_s3c_set_clock,
 	.get_min_clock		= sdhci_s3c_get_min_clock,
+	.platform_8bit_width	= sdhci_s3c_platform_8bit_width,
 };
 
 static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
@@ -473,6 +506,9 @@
 	if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
 		host->mmc->caps = MMC_CAP_NONREMOVABLE;
 
+	if (pdata->host_caps)
+		host->mmc->caps |= pdata->host_caps;
+
 	host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
 			 SDHCI_QUIRK_32BIT_DMA_SIZE);
 
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index f8f65df..f08f944 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -19,7 +19,6 @@
 #include <linux/module.h>
 #include <linux/usb.h>
 #include <linux/kernel.h>
-#include <linux/usb.h>
 #include <linux/slab.h>
 #include <linux/dma-mapping.h>
 #include <linux/mmc/host.h>
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index d9d7efb..6322d1f 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -930,7 +930,7 @@
 
 	init_completion(&dev->dma_done);
 
-	dev->card_workqueue = create_freezeable_workqueue(DRV_NAME);
+	dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
 
 	if (!dev->card_workqueue)
 		goto error9;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 67822cf..ac0d6a8 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1258,7 +1258,7 @@
 static __init int sm_module_init(void)
 {
 	int error = 0;
-	cache_flush_workqueue = create_freezeable_workqueue("smflush");
+	cache_flush_workqueue = create_freezable_workqueue("smflush");
 
 	if (IS_ERR(cache_flush_workqueue))
 		return PTR_ERR(cache_flush_workqueue);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index f49e49d..5ebe280 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -672,33 +672,7 @@
 		ubi->nor_flash = 1;
 	}
 
-	/*
-	 * Set UBI min. I/O size (@ubi->min_io_size). We use @mtd->writebufsize
-	 * for these purposes, not @mtd->writesize. At the moment this does not
-	 * matter for NAND, because currently @mtd->writebufsize is equivalent to
-	 * @mtd->writesize for all NANDs. However, some CFI NOR flashes may
-	 * have @mtd->writebufsize which is multiple of @mtd->writesize.
-	 *
-	 * The reason we use @mtd->writebufsize for @ubi->min_io_size is that
-	 * UBI and UBIFS recovery algorithms rely on the fact that if there was
-	 * an unclean power cut, then we can find offset of the last corrupted
-	 * node, align the offset to @ubi->min_io_size, read the rest of the
-	 * eraseblock starting from this offset, and check whether there are
-	 * only 0xFF bytes. If yes, then we are probably dealing with a
-	 * corruption caused by a power cut, if not, then this is probably some
-	 * severe corruption.
-	 *
-	 * Thus, we have to use the maximum write unit size of the flash, which
-	 * is @mtd->writebufsize, because @mtd->writesize is the minimum write
-	 * size, not the maximum.
-	 */
-	if (ubi->mtd->type == MTD_NANDFLASH)
-		ubi_assert(ubi->mtd->writebufsize == ubi->mtd->writesize);
-	else if (ubi->mtd->type == MTD_NORFLASH)
-		ubi_assert(ubi->mtd->writebufsize % ubi->mtd->writesize == 0);
-
-	ubi->min_io_size = ubi->mtd->writebufsize;
-
+	ubi->min_io_size = ubi->mtd->writesize;
 	ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
 
 	/*
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 16fe4f9..f4b3927 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -238,8 +238,8 @@
 config AX88796
 	tristate "ASIX AX88796 NE2000 clone support"
 	depends on ARM || MIPS || SUPERH
-	select CRC32
-	select MII
+	select PHYLIB
+	select MDIO_BITBANG
 	help
 	  AX88796 driver, using platform bus to provide
 	  chip detection and resources
@@ -1944,7 +1944,8 @@
 config FEC
 	bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
 	depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
-		MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28
+		IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC
+	default IMX_HAVE_PLATFORM_FEC || MXS_HAVE_PLATFORM_FEC if ARM
 	select PHYLIB
 	help
 	  Say Y here if you want to use the built-in 10/100 Fast ethernet
@@ -2864,7 +2865,7 @@
 	default n
 
 config MLX4_DEBUG
-	bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED)
+	bool "Verbose debugging output" if (MLX4_CORE && EXPERT)
 	depends on MLX4_CORE
 	default y
 	---help---
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 62d6f88..aa07657 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -1644,7 +1644,7 @@
 module_init(ks8695_init);
 module_exit(ks8695_cleanup);
 
-MODULE_AUTHOR("Simtec Electronics")
+MODULE_AUTHOR("Simtec Electronics");
 MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:" MODULENAME);
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 1bf6720..23f2ab0 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -345,7 +345,7 @@
  */
 static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
 {
-	u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_SPEED_MASK;
+	u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_ALL;
 	u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP &
 				~GIGA_CR_1000T_SPEED_MASK;
 
@@ -373,7 +373,7 @@
 	}
 
 	if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 ||
-	    atl1c_write_phy_reg(hw, MII_GIGA_CR, mii_giga_ctrl_data) != 0)
+	    atl1c_write_phy_reg(hw, MII_CTRL1000, mii_giga_ctrl_data) != 0)
 		return -1;
 	return 0;
 }
@@ -517,19 +517,18 @@
 					"Error Setting up Auto-Negotiation\n");
 			return ret_val;
 		}
-		mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG;
+		mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
 		break;
 	case MEDIA_TYPE_100M_FULL:
-		mii_bmcr_data |= BMCR_SPEED_100 | BMCR_FULL_DUPLEX;
+		mii_bmcr_data |= BMCR_SPEED100 | BMCR_FULLDPLX;
 		break;
 	case MEDIA_TYPE_100M_HALF:
-		mii_bmcr_data |= BMCR_SPEED_100;
+		mii_bmcr_data |= BMCR_SPEED100;
 		break;
 	case MEDIA_TYPE_10M_FULL:
-		mii_bmcr_data |= BMCR_SPEED_10 | BMCR_FULL_DUPLEX;
+		mii_bmcr_data |= BMCR_FULLDPLX;
 		break;
 	case MEDIA_TYPE_10M_HALF:
-		mii_bmcr_data |= BMCR_SPEED_10;
 		break;
 	default:
 		if (netif_msg_link(adapter))
@@ -657,7 +656,7 @@
 	err = atl1c_phy_setup_adv(hw);
 	if (err)
 		return err;
-	mii_bmcr_data |= BMCR_AUTO_NEG_EN | BMCR_RESTART_AUTO_NEG;
+	mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART;
 
 	return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
 }
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h
index 3dd6759..655fc6c 100644
--- a/drivers/net/atl1c/atl1c_hw.h
+++ b/drivers/net/atl1c/atl1c_hw.h
@@ -736,55 +736,16 @@
 #define REG_DEBUG_DATA0 		0x1900
 #define REG_DEBUG_DATA1 		0x1904
 
-/* PHY Control Register */
-#define MII_BMCR			0x00
-#define BMCR_SPEED_SELECT_MSB		0x0040  /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define BMCR_COLL_TEST_ENABLE		0x0080  /* Collision test enable */
-#define BMCR_FULL_DUPLEX		0x0100  /* FDX =1, half duplex =0 */
-#define BMCR_RESTART_AUTO_NEG		0x0200  /* Restart auto negotiation */
-#define BMCR_ISOLATE			0x0400  /* Isolate PHY from MII */
-#define BMCR_POWER_DOWN			0x0800  /* Power down */
-#define BMCR_AUTO_NEG_EN		0x1000  /* Auto Neg Enable */
-#define BMCR_SPEED_SELECT_LSB		0x2000  /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define BMCR_LOOPBACK			0x4000  /* 0 = normal, 1 = loopback */
-#define BMCR_RESET			0x8000  /* 0 = normal, 1 = PHY reset */
-#define BMCR_SPEED_MASK			0x2040
-#define BMCR_SPEED_1000			0x0040
-#define BMCR_SPEED_100			0x2000
-#define BMCR_SPEED_10			0x0000
-
-/* PHY Status Register */
-#define MII_BMSR			0x01
-#define BMMSR_EXTENDED_CAPS		0x0001  /* Extended register capabilities */
-#define BMSR_JABBER_DETECT		0x0002  /* Jabber Detected */
-#define BMSR_LINK_STATUS		0x0004  /* Link Status 1 = link */
-#define BMSR_AUTONEG_CAPS		0x0008  /* Auto Neg Capable */
-#define BMSR_REMOTE_FAULT		0x0010  /* Remote Fault Detect */
-#define BMSR_AUTONEG_COMPLETE		0x0020  /* Auto Neg Complete */
-#define BMSR_PREAMBLE_SUPPRESS		0x0040  /* Preamble may be suppressed */
-#define BMSR_EXTENDED_STATUS		0x0100  /* Ext. status info in Reg 0x0F */
-#define BMSR_100T2_HD_CAPS		0x0200  /* 100T2 Half Duplex Capable */
-#define BMSR_100T2_FD_CAPS		0x0400  /* 100T2 Full Duplex Capable */
-#define BMSR_10T_HD_CAPS		0x0800  /* 10T   Half Duplex Capable */
-#define BMSR_10T_FD_CAPS		0x1000  /* 10T   Full Duplex Capable */
-#define BMSR_100X_HD_CAPS		0x2000  /* 100X  Half Duplex Capable */
-#define BMMII_SR_100X_FD_CAPS		0x4000  /* 100X  Full Duplex Capable */
-#define BMMII_SR_100T4_CAPS		0x8000  /* 100T4 Capable */
-
-#define MII_PHYSID1			0x02
-#define MII_PHYSID2			0x03
 #define L1D_MPW_PHYID1			0xD01C  /* V7 */
 #define L1D_MPW_PHYID2			0xD01D  /* V1-V6 */
 #define L1D_MPW_PHYID3			0xD01E  /* V8 */
 
 
 /* Autoneg Advertisement Register */
-#define MII_ADVERTISE			0x04
-#define ADVERTISE_SPEED_MASK		0x01E0
-#define ADVERTISE_DEFAULT_CAP		0x0DE0
+#define ADVERTISE_DEFAULT_CAP \
+	(ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)
 
 /* 1000BASE-T Control Register */
-#define MII_GIGA_CR			0x09
 #define GIGA_CR_1000T_REPEATER_DTE	0x0400  /* 1=Repeater/switch device port 0=DTE device */
 
 #define GIGA_CR_1000T_MS_VALUE		0x0800  /* 1=Configure PHY as Master 0=Configure PHY as Slave */
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index a699bbf..e60595f 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -48,6 +48,7 @@
 	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)},
 	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)},
 	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)},
+	{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)},
 	/* required last entry */
 	{ 0 }
 };
@@ -2717,7 +2718,6 @@
 		goto err_reset;
 	}
 
-	device_init_wakeup(&pdev->dev, 1);
 	/* reset the controller to
 	 * put the device in a known good starting state */
 	err = atl1c_phy_init(&adapter->hw);
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index 6943a6c..1209297 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -95,18 +95,18 @@
 		ecmd->advertising = hw->autoneg_advertised |
 				    ADVERTISED_TP | ADVERTISED_Autoneg;
 
-		adv4 = hw->mii_autoneg_adv_reg & ~MII_AR_SPEED_MASK;
+		adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL;
 		adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK;
 		if (hw->autoneg_advertised & ADVERTISE_10_HALF)
-			adv4 |= MII_AR_10T_HD_CAPS;
+			adv4 |= ADVERTISE_10HALF;
 		if (hw->autoneg_advertised & ADVERTISE_10_FULL)
-			adv4 |= MII_AR_10T_FD_CAPS;
+			adv4 |= ADVERTISE_10FULL;
 		if (hw->autoneg_advertised & ADVERTISE_100_HALF)
-			adv4 |= MII_AR_100TX_HD_CAPS;
+			adv4 |= ADVERTISE_100HALF;
 		if (hw->autoneg_advertised & ADVERTISE_100_FULL)
-			adv4 |= MII_AR_100TX_FD_CAPS;
+			adv4 |= ADVERTISE_100FULL;
 		if (hw->autoneg_advertised & ADVERTISE_1000_FULL)
-			adv9 |= MII_AT001_CR_1000T_FD_CAPS;
+			adv9 |= ADVERTISE_1000FULL;
 
 		if (adv4 != hw->mii_autoneg_adv_reg ||
 				adv9 != hw->mii_1000t_ctrl_reg) {
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c
index 76cc043..923063d 100644
--- a/drivers/net/atl1e/atl1e_hw.c
+++ b/drivers/net/atl1e/atl1e_hw.c
@@ -318,7 +318,7 @@
 	 * Advertisement Register (Address 4) and the 1000 mb speed bits in
 	 * the  1000Base-T control Register (Address 9).
 	 */
-	mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
+	mii_autoneg_adv_reg &= ~ADVERTISE_ALL;
 	mii_1000t_ctrl_reg  &= ~MII_AT001_CR_1000T_SPEED_MASK;
 
 	/*
@@ -327,44 +327,37 @@
 	 */
 	switch (hw->media_type) {
 	case MEDIA_TYPE_AUTO_SENSOR:
-		mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS   |
-					MII_AR_10T_FD_CAPS   |
-					MII_AR_100TX_HD_CAPS |
-					MII_AR_100TX_FD_CAPS);
-		hw->autoneg_advertised = ADVERTISE_10_HALF  |
-					 ADVERTISE_10_FULL  |
-					 ADVERTISE_100_HALF |
-					 ADVERTISE_100_FULL;
+		mii_autoneg_adv_reg |= ADVERTISE_ALL;
+		hw->autoneg_advertised = ADVERTISE_ALL;
 		if (hw->nic_type == athr_l1e) {
-			mii_1000t_ctrl_reg |=
-				MII_AT001_CR_1000T_FD_CAPS;
+			mii_1000t_ctrl_reg |= ADVERTISE_1000FULL;
 			hw->autoneg_advertised |= ADVERTISE_1000_FULL;
 		}
 		break;
 
 	case MEDIA_TYPE_100M_FULL:
-		mii_autoneg_adv_reg   |= MII_AR_100TX_FD_CAPS;
+		mii_autoneg_adv_reg   |= ADVERTISE_100FULL;
 		hw->autoneg_advertised = ADVERTISE_100_FULL;
 		break;
 
 	case MEDIA_TYPE_100M_HALF:
-		mii_autoneg_adv_reg   |= MII_AR_100TX_HD_CAPS;
+		mii_autoneg_adv_reg   |= ADVERTISE_100_HALF;
 		hw->autoneg_advertised = ADVERTISE_100_HALF;
 		break;
 
 	case MEDIA_TYPE_10M_FULL:
-		mii_autoneg_adv_reg   |= MII_AR_10T_FD_CAPS;
+		mii_autoneg_adv_reg   |= ADVERTISE_10_FULL;
 		hw->autoneg_advertised = ADVERTISE_10_FULL;
 		break;
 
 	default:
-		mii_autoneg_adv_reg   |= MII_AR_10T_HD_CAPS;
+		mii_autoneg_adv_reg   |= ADVERTISE_10_HALF;
 		hw->autoneg_advertised = ADVERTISE_10_HALF;
 		break;
 	}
 
 	/* flow control fixed to enable all */
-	mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
+	mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
 
 	hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
 	hw->mii_1000t_ctrl_reg  = mii_1000t_ctrl_reg;
@@ -374,7 +367,7 @@
 		return ret_val;
 
 	if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
-		ret_val = atl1e_write_phy_reg(hw, MII_AT001_CR,
+		ret_val = atl1e_write_phy_reg(hw, MII_CTRL1000,
 					   mii_1000t_ctrl_reg);
 		if (ret_val)
 			return ret_val;
@@ -397,7 +390,7 @@
 	int ret_val;
 	u16 phy_data;
 
-	phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG;
+	phy_data = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
 
 	ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data);
 	if (ret_val) {
@@ -645,15 +638,14 @@
 		return err;
 
 	if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) {
-		err = atl1e_write_phy_reg(hw, MII_AT001_CR,
+		err = atl1e_write_phy_reg(hw, MII_CTRL1000,
 				       hw->mii_1000t_ctrl_reg);
 		if (err)
 			return err;
 	}
 
 	err = atl1e_write_phy_reg(hw, MII_BMCR,
-			MII_CR_RESET | MII_CR_AUTO_NEG_EN |
-			MII_CR_RESTART_AUTO_NEG);
+			BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
 	return err;
 }
 
diff --git a/drivers/net/atl1e/atl1e_hw.h b/drivers/net/atl1e/atl1e_hw.h
index 5ea2f4d..74df16a 100644
--- a/drivers/net/atl1e/atl1e_hw.h
+++ b/drivers/net/atl1e/atl1e_hw.h
@@ -629,127 +629,24 @@
 
 /***************************** MII definition ***************************************/
 /* PHY Common Register */
-#define MII_BMCR                        0x00
-#define MII_BMSR                        0x01
-#define MII_PHYSID1                     0x02
-#define MII_PHYSID2                     0x03
-#define MII_ADVERTISE                   0x04
-#define MII_LPA                         0x05
-#define MII_EXPANSION                   0x06
-#define MII_AT001_CR                    0x09
-#define MII_AT001_SR                    0x0A
-#define MII_AT001_ESR                   0x0F
 #define MII_AT001_PSCR                  0x10
 #define MII_AT001_PSSR                  0x11
 #define MII_INT_CTRL                    0x12
 #define MII_INT_STATUS                  0x13
 #define MII_SMARTSPEED                  0x14
-#define MII_RERRCOUNTER                 0x15
-#define MII_SREVISION                   0x16
-#define MII_RESV1                       0x17
 #define MII_LBRERROR                    0x18
-#define MII_PHYADDR                     0x19
 #define MII_RESV2                       0x1a
-#define MII_TPISTATUS                   0x1b
-#define MII_NCONFIG                     0x1c
 
 #define MII_DBG_ADDR			0x1D
 #define MII_DBG_DATA			0x1E
 
-
-/* PHY Control Register */
-#define MII_CR_SPEED_SELECT_MSB                  0x0040  /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_COLL_TEST_ENABLE                  0x0080  /* Collision test enable */
-#define MII_CR_FULL_DUPLEX                       0x0100  /* FDX =1, half duplex =0 */
-#define MII_CR_RESTART_AUTO_NEG                  0x0200  /* Restart auto negotiation */
-#define MII_CR_ISOLATE                           0x0400  /* Isolate PHY from MII */
-#define MII_CR_POWER_DOWN                        0x0800  /* Power down */
-#define MII_CR_AUTO_NEG_EN                       0x1000  /* Auto Neg Enable */
-#define MII_CR_SPEED_SELECT_LSB                  0x2000  /* bits 6,13: 10=1000, 01=100, 00=10 */
-#define MII_CR_LOOPBACK                          0x4000  /* 0 = normal, 1 = loopback */
-#define MII_CR_RESET                             0x8000  /* 0 = normal, 1 = PHY reset */
-#define MII_CR_SPEED_MASK                        0x2040
-#define MII_CR_SPEED_1000                        0x0040
-#define MII_CR_SPEED_100                         0x2000
-#define MII_CR_SPEED_10                          0x0000
-
-
-/* PHY Status Register */
-#define MII_SR_EXTENDED_CAPS                     0x0001  /* Extended register capabilities */
-#define MII_SR_JABBER_DETECT                     0x0002  /* Jabber Detected */
-#define MII_SR_LINK_STATUS                       0x0004  /* Link Status 1 = link */
-#define MII_SR_AUTONEG_CAPS                      0x0008  /* Auto Neg Capable */
-#define MII_SR_REMOTE_FAULT                      0x0010  /* Remote Fault Detect */
-#define MII_SR_AUTONEG_COMPLETE                  0x0020  /* Auto Neg Complete */
-#define MII_SR_PREAMBLE_SUPPRESS                 0x0040  /* Preamble may be suppressed */
-#define MII_SR_EXTENDED_STATUS                   0x0100  /* Ext. status info in Reg 0x0F */
-#define MII_SR_100T2_HD_CAPS                     0x0200  /* 100T2 Half Duplex Capable */
-#define MII_SR_100T2_FD_CAPS                     0x0400  /* 100T2 Full Duplex Capable */
-#define MII_SR_10T_HD_CAPS                       0x0800  /* 10T   Half Duplex Capable */
-#define MII_SR_10T_FD_CAPS                       0x1000  /* 10T   Full Duplex Capable */
-#define MII_SR_100X_HD_CAPS                      0x2000  /* 100X  Half Duplex Capable */
-#define MII_SR_100X_FD_CAPS                      0x4000  /* 100X  Full Duplex Capable */
-#define MII_SR_100T4_CAPS                        0x8000  /* 100T4 Capable */
-
-/* Link partner ability register. */
-#define MII_LPA_SLCT                             0x001f  /* Same as advertise selector  */
-#define MII_LPA_10HALF                           0x0020  /* Can do 10mbps half-duplex   */
-#define MII_LPA_10FULL                           0x0040  /* Can do 10mbps full-duplex   */
-#define MII_LPA_100HALF                          0x0080  /* Can do 100mbps half-duplex  */
-#define MII_LPA_100FULL                          0x0100  /* Can do 100mbps full-duplex  */
-#define MII_LPA_100BASE4                         0x0200  /* 100BASE-T4  */
-#define MII_LPA_PAUSE                            0x0400  /* PAUSE */
-#define MII_LPA_ASYPAUSE                         0x0800  /* Asymmetrical PAUSE */
-#define MII_LPA_RFAULT                           0x2000  /* Link partner faulted        */
-#define MII_LPA_LPACK                            0x4000  /* Link partner acked us       */
-#define MII_LPA_NPAGE                            0x8000  /* Next page bit               */
-
 /* Autoneg Advertisement Register */
-#define MII_AR_SELECTOR_FIELD                   0x0001  /* indicates IEEE 802.3 CSMA/CD */
-#define MII_AR_10T_HD_CAPS                      0x0020  /* 10T   Half Duplex Capable */
-#define MII_AR_10T_FD_CAPS                      0x0040  /* 10T   Full Duplex Capable */
-#define MII_AR_100TX_HD_CAPS                    0x0080  /* 100TX Half Duplex Capable */
-#define MII_AR_100TX_FD_CAPS                    0x0100  /* 100TX Full Duplex Capable */
-#define MII_AR_100T4_CAPS                       0x0200  /* 100T4 Capable */
-#define MII_AR_PAUSE                            0x0400  /* Pause operation desired */
-#define MII_AR_ASM_DIR                          0x0800  /* Asymmetric Pause Direction bit */
-#define MII_AR_REMOTE_FAULT                     0x2000  /* Remote Fault detected */
-#define MII_AR_NEXT_PAGE                        0x8000  /* Next Page ability supported */
-#define MII_AR_SPEED_MASK                       0x01E0
-#define MII_AR_DEFAULT_CAP_MASK                 0x0DE0
+#define MII_AR_DEFAULT_CAP_MASK                 0
 
 /* 1000BASE-T Control Register */
-#define MII_AT001_CR_1000T_HD_CAPS              0x0100  /* Advertise 1000T HD capability */
-#define MII_AT001_CR_1000T_FD_CAPS              0x0200  /* Advertise 1000T FD capability  */
-#define MII_AT001_CR_1000T_REPEATER_DTE         0x0400  /* 1=Repeater/switch device port */
-/* 0=DTE device */
-#define MII_AT001_CR_1000T_MS_VALUE             0x0800  /* 1=Configure PHY as Master */
-/* 0=Configure PHY as Slave */
-#define MII_AT001_CR_1000T_MS_ENABLE            0x1000  /* 1=Master/Slave manual config value */
-/* 0=Automatic Master/Slave config */
-#define MII_AT001_CR_1000T_TEST_MODE_NORMAL     0x0000  /* Normal Operation */
-#define MII_AT001_CR_1000T_TEST_MODE_1          0x2000  /* Transmit Waveform test */
-#define MII_AT001_CR_1000T_TEST_MODE_2          0x4000  /* Master Transmit Jitter test */
-#define MII_AT001_CR_1000T_TEST_MODE_3          0x6000  /* Slave Transmit Jitter test */
-#define MII_AT001_CR_1000T_TEST_MODE_4          0x8000  /* Transmitter Distortion test */
-#define MII_AT001_CR_1000T_SPEED_MASK           0x0300
-#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK     0x0300
-
-/* 1000BASE-T Status Register */
-#define MII_AT001_SR_1000T_LP_HD_CAPS           0x0400  /* LP is 1000T HD capable */
-#define MII_AT001_SR_1000T_LP_FD_CAPS           0x0800  /* LP is 1000T FD capable */
-#define MII_AT001_SR_1000T_REMOTE_RX_STATUS     0x1000  /* Remote receiver OK */
-#define MII_AT001_SR_1000T_LOCAL_RX_STATUS      0x2000  /* Local receiver OK */
-#define MII_AT001_SR_1000T_MS_CONFIG_RES        0x4000  /* 1=Local TX is Master, 0=Slave */
-#define MII_AT001_SR_1000T_MS_CONFIG_FAULT      0x8000  /* Master/Slave config fault */
-#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT   12
-#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT    13
-
-/* Extended Status Register */
-#define MII_AT001_ESR_1000T_HD_CAPS             0x1000  /* 1000T HD capable */
-#define MII_AT001_ESR_1000T_FD_CAPS             0x2000  /* 1000T FD capable */
-#define MII_AT001_ESR_1000X_HD_CAPS             0x4000  /* 1000X HD capable */
-#define MII_AT001_ESR_1000X_FD_CAPS             0x8000  /* 1000X FD capable */
+#define MII_AT001_CR_1000T_SPEED_MASK \
+	(ADVERTISE_1000FULL | ADVERTISE_1000HALF)
+#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK	MII_AT001_CR_1000T_SPEED_MASK
 
 /* AT001 PHY Specific Control Register */
 #define MII_AT001_PSCR_JABBER_DISABLE           0x0001  /* 1=Jabber Function disabled */
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index e28f8ba..bf7500c 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -2051,9 +2051,9 @@
 		atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
 		atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
 
-		mii_advertise_data = MII_AR_10T_HD_CAPS;
+		mii_advertise_data = ADVERTISE_10HALF;
 
-		if ((atl1e_write_phy_reg(hw, MII_AT001_CR, 0) != 0) ||
+		if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) ||
 		    (atl1e_write_phy_reg(hw,
 			   MII_ADVERTISE, mii_advertise_data) != 0) ||
 		    (atl1e_phy_commit(hw)) != 0) {
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 3b52768..67f40b9 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -950,6 +950,7 @@
 	hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 
 	adapter->wol = 0;
+	device_set_wakeup_enable(&adapter->pdev->dev, false);
 	adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
 	adapter->ict = 50000;		/* 100ms */
 	adapter->link_speed = SPEED_0;	/* hardware init */
@@ -2735,15 +2736,15 @@
 }
 
 #ifdef CONFIG_PM
-static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
+static int atl1_suspend(struct device *dev)
 {
+	struct pci_dev *pdev = to_pci_dev(dev);
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct atl1_adapter *adapter = netdev_priv(netdev);
 	struct atl1_hw *hw = &adapter->hw;
 	u32 ctrl = 0;
 	u32 wufc = adapter->wol;
 	u32 val;
-	int retval;
 	u16 speed;
 	u16 duplex;
 
@@ -2751,17 +2752,15 @@
 	if (netif_running(netdev))
 		atl1_down(adapter);
 
-	retval = pci_save_state(pdev);
-	if (retval)
-		return retval;
-
 	atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
 	atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
 	val = ctrl & BMSR_LSTATUS;
 	if (val)
 		wufc &= ~ATLX_WUFC_LNKC;
+	if (!wufc)
+		goto disable_wol;
 
-	if (val && wufc) {
+	if (val) {
 		val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
 		if (val) {
 			if (netif_msg_ifdown(adapter))
@@ -2798,23 +2797,18 @@
 		ctrl |= PCIE_PHYMISC_FORCE_RCV_DET;
 		iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
 		ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
-
-		pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
-		goto exit;
-	}
-
-	if (!val && wufc) {
+	} else {
 		ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
 		iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
 		ioread32(hw->hw_addr + REG_WOL_CTRL);
 		iowrite32(0, hw->hw_addr + REG_MAC_CTRL);
 		ioread32(hw->hw_addr + REG_MAC_CTRL);
 		hw->phy_configured = false;
-		pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
-		goto exit;
 	}
 
-disable_wol:
+	return 0;
+
+ disable_wol:
 	iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
 	ioread32(hw->hw_addr + REG_WOL_CTRL);
 	ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
@@ -2822,37 +2816,17 @@
 	iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC);
 	ioread32(hw->hw_addr + REG_PCIE_PHYMISC);
 	hw->phy_configured = false;
-	pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
-exit:
-	if (netif_running(netdev))
-		pci_disable_msi(adapter->pdev);
-	pci_disable_device(pdev);
-	pci_set_power_state(pdev, pci_choose_state(pdev, state));
 
 	return 0;
 }
 
-static int atl1_resume(struct pci_dev *pdev)
+static int atl1_resume(struct device *dev)
 {
+	struct pci_dev *pdev = to_pci_dev(dev);
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct atl1_adapter *adapter = netdev_priv(netdev);
-	u32 err;
 
-	pci_set_power_state(pdev, PCI_D0);
-	pci_restore_state(pdev);
-
-	err = pci_enable_device(pdev);
-	if (err) {
-		if (netif_msg_ifup(adapter))
-			dev_printk(KERN_DEBUG, &pdev->dev,
-				"error enabling pci device\n");
-		return err;
-	}
-
-	pci_set_master(pdev);
 	iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
-	pci_enable_wake(pdev, PCI_D3hot, 0);
-	pci_enable_wake(pdev, PCI_D3cold, 0);
 
 	atl1_reset_hw(&adapter->hw);
 
@@ -2864,16 +2838,25 @@
 
 	return 0;
 }
+
+static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume);
+#define ATL1_PM_OPS	(&atl1_pm_ops)
+
 #else
-#define atl1_suspend NULL
-#define atl1_resume NULL
+
+static int atl1_suspend(struct device *dev) { return 0; }
+
+#define ATL1_PM_OPS	NULL
 #endif
 
 static void atl1_shutdown(struct pci_dev *pdev)
 {
-#ifdef CONFIG_PM
-	atl1_suspend(pdev, PMSG_SUSPEND);
-#endif
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct atl1_adapter *adapter = netdev_priv(netdev);
+
+	atl1_suspend(&pdev->dev);
+	pci_wake_from_d3(pdev, adapter->wol);
+	pci_set_power_state(pdev, PCI_D3hot);
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3117,9 +3100,8 @@
 	.id_table = atl1_pci_tbl,
 	.probe = atl1_probe,
 	.remove = __devexit_p(atl1_remove),
-	.suspend = atl1_suspend,
-	.resume = atl1_resume,
-	.shutdown = atl1_shutdown
+	.shutdown = atl1_shutdown,
+	.driver.pm = ATL1_PM_OPS,
 };
 
 /*
@@ -3409,6 +3391,9 @@
 	adapter->wol = 0;
 	if (wol->wolopts & WAKE_MAGIC)
 		adapter->wol |= ATLX_WUFC_MAG;
+
+	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
 	return 0;
 }
 
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 4bebff3..e7cb8c8 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -9,7 +9,7 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
-*/
+ */
 
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -17,46 +17,45 @@
 #include <linux/isapnp.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/timer.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
-#include <linux/mii.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/phy.h>
 #include <linux/eeprom_93cx6.h>
 #include <linux/slab.h>
 
 #include <net/ax88796.h>
 
 #include <asm/system.h>
-#include <asm/io.h>
-
-static int phy_debug = 0;
 
 /* Rename the lib8390.c functions to show that they are in this driver */
-#define __ei_open       ax_ei_open
-#define __ei_close      ax_ei_close
-#define __ei_poll	ax_ei_poll
+#define __ei_open ax_ei_open
+#define __ei_close ax_ei_close
+#define __ei_poll ax_ei_poll
 #define __ei_start_xmit ax_ei_start_xmit
 #define __ei_tx_timeout ax_ei_tx_timeout
-#define __ei_get_stats  ax_ei_get_stats
+#define __ei_get_stats ax_ei_get_stats
 #define __ei_set_multicast_list ax_ei_set_multicast_list
-#define __ei_interrupt  ax_ei_interrupt
+#define __ei_interrupt ax_ei_interrupt
 #define ____alloc_ei_netdev ax__alloc_ei_netdev
-#define __NS8390_init   ax_NS8390_init
+#define __NS8390_init ax_NS8390_init
 
 /* force unsigned long back to 'void __iomem *' */
 #define ax_convert_addr(_a) ((void __force __iomem *)(_a))
 
-#define ei_inb(_a)	readb(ax_convert_addr(_a))
+#define ei_inb(_a) readb(ax_convert_addr(_a))
 #define ei_outb(_v, _a) writeb(_v, ax_convert_addr(_a))
 
-#define ei_inb_p(_a)	ei_inb(_a)
+#define ei_inb_p(_a) ei_inb(_a)
 #define ei_outb_p(_v, _a) ei_outb(_v, _a)
 
 /* define EI_SHIFT() to take into account our register offsets */
-#define EI_SHIFT(x)     (ei_local->reg_offset[(x)])
+#define EI_SHIFT(x) (ei_local->reg_offset[(x)])
 
 /* Ensure we have our RCR base value */
 #define AX88796_PLATFORM
@@ -74,43 +73,46 @@
 #define NE_DATAPORT	EI_SHIFT(0x10)
 
 #define NE1SM_START_PG	0x20	/* First page of TX buffer */
-#define NE1SM_STOP_PG 	0x40	/* Last page +1 of RX ring */
+#define NE1SM_STOP_PG	0x40	/* Last page +1 of RX ring */
 #define NESM_START_PG	0x40	/* First page of TX buffer */
 #define NESM_STOP_PG	0x80	/* Last page +1 of RX ring */
 
+#define AX_GPOC_PPDSET	BIT(6)
+
 /* device private data */
 
 struct ax_device {
-	struct timer_list	 mii_timer;
-	spinlock_t		 mii_lock;
-	struct mii_if_info	 mii;
+	struct mii_bus *mii_bus;
+	struct mdiobb_ctrl bb_ctrl;
+	struct phy_device *phy_dev;
+	void __iomem *addr_memr;
+	u8 reg_memr;
+	int link;
+	int speed;
+	int duplex;
 
-	u32			 msg_enable;
-	void __iomem		*map2;
-	struct platform_device	*dev;
-	struct resource		*mem;
-	struct resource		*mem2;
-	struct ax_plat_data	*plat;
+	void __iomem *map2;
+	const struct ax_plat_data *plat;
 
-	unsigned char		 running;
-	unsigned char		 resume_open;
-	unsigned int		 irqflags;
+	unsigned char running;
+	unsigned char resume_open;
+	unsigned int irqflags;
 
-	u32			 reg_offsets[0x20];
+	u32 reg_offsets[0x20];
 };
 
 static inline struct ax_device *to_ax_dev(struct net_device *dev)
 {
 	struct ei_device *ei_local = netdev_priv(dev);
-	return (struct ax_device *)(ei_local+1);
+	return (struct ax_device *)(ei_local + 1);
 }
 
-/* ax_initial_check
+/*
+ * ax_initial_check
  *
  * do an initial probe for the card to check wether it exists
  * and is functional
  */
-
 static int ax_initial_check(struct net_device *dev)
 {
 	struct ei_device *ei_local = netdev_priv(dev);
@@ -122,10 +124,10 @@
 	if (reg0 == 0xFF)
 		return -ENODEV;
 
-	ei_outb(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
+	ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD);
 	regd = ei_inb(ioaddr + 0x0d);
 	ei_outb(0xff, ioaddr + 0x0d);
-	ei_outb(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
+	ei_outb(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD);
 	ei_inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
 	if (ei_inb(ioaddr + EN0_COUNTER0) != 0) {
 		ei_outb(reg0, ioaddr);
@@ -136,29 +138,28 @@
 	return 0;
 }
 
-/* Hard reset the card.  This used to pause for the same period that a
-   8390 reset command required, but that shouldn't be necessary. */
-
+/*
+ * Hard reset the card. This used to pause for the same period that a
+ * 8390 reset command required, but that shouldn't be necessary.
+ */
 static void ax_reset_8390(struct net_device *dev)
 {
 	struct ei_device *ei_local = netdev_priv(dev);
-	struct ax_device  *ax = to_ax_dev(dev);
 	unsigned long reset_start_time = jiffies;
 	void __iomem *addr = (void __iomem *)dev->base_addr;
 
 	if (ei_debug > 1)
-		dev_dbg(&ax->dev->dev, "resetting the 8390 t=%ld\n", jiffies);
+		netdev_dbg(dev, "resetting the 8390 t=%ld\n", jiffies);
 
 	ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
 
-	ei_status.txing = 0;
-	ei_status.dmaing = 0;
+	ei_local->txing = 0;
+	ei_local->dmaing = 0;
 
 	/* This check _should_not_ be necessary, omit eventually. */
 	while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
-		if (jiffies - reset_start_time > 2*HZ/100) {
-			dev_warn(&ax->dev->dev, "%s: %s did not complete.\n",
-			       __func__, dev->name);
+		if (jiffies - reset_start_time > 2 * HZ / 100) {
+			netdev_warn(dev, "%s: did not complete.\n", __func__);
 			break;
 		}
 	}
@@ -171,70 +172,72 @@
 			    int ring_page)
 {
 	struct ei_device *ei_local = netdev_priv(dev);
-	struct ax_device  *ax = to_ax_dev(dev);
 	void __iomem *nic_base = ei_local->mem;
 
 	/* This *shouldn't* happen. If it does, it's the last thing you'll see */
-	if (ei_status.dmaing) {
-		dev_err(&ax->dev->dev, "%s: DMAing conflict in %s "
+	if (ei_local->dmaing) {
+		netdev_err(dev, "DMAing conflict in %s "
 			"[DMAstat:%d][irqlock:%d].\n",
-			dev->name, __func__,
-			ei_status.dmaing, ei_status.irqlock);
+			__func__,
+			ei_local->dmaing, ei_local->irqlock);
 		return;
 	}
 
-	ei_status.dmaing |= 0x01;
-	ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+	ei_local->dmaing |= 0x01;
+	ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD);
 	ei_outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
 	ei_outb(0, nic_base + EN0_RCNTHI);
 	ei_outb(0, nic_base + EN0_RSARLO);		/* On page boundary */
 	ei_outb(ring_page, nic_base + EN0_RSARHI);
 	ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
 
-	if (ei_status.word16)
-		readsw(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+	if (ei_local->word16)
+		readsw(nic_base + NE_DATAPORT, hdr,
+		       sizeof(struct e8390_pkt_hdr) >> 1);
 	else
-		readsb(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
+		readsb(nic_base + NE_DATAPORT, hdr,
+		       sizeof(struct e8390_pkt_hdr));
 
 	ei_outb(ENISR_RDC, nic_base + EN0_ISR);	/* Ack intr. */
-	ei_status.dmaing &= ~0x01;
+	ei_local->dmaing &= ~0x01;
 
 	le16_to_cpus(&hdr->count);
 }
 
 
-/* Block input and output, similar to the Crynwr packet driver.  If you
-   are porting to a new ethercard, look at the packet driver source for hints.
-   The NEx000 doesn't share the on-board packet memory -- you have to put
-   the packet out through the "remote DMA" dataport using ei_outb. */
-
+/*
+ * Block input and output, similar to the Crynwr packet driver. If
+ * you are porting to a new ethercard, look at the packet driver
+ * source for hints. The NEx000 doesn't share the on-board packet
+ * memory -- you have to put the packet out through the "remote DMA"
+ * dataport using ei_outb.
+ */
 static void ax_block_input(struct net_device *dev, int count,
 			   struct sk_buff *skb, int ring_offset)
 {
 	struct ei_device *ei_local = netdev_priv(dev);
-	struct ax_device  *ax = to_ax_dev(dev);
 	void __iomem *nic_base = ei_local->mem;
 	char *buf = skb->data;
 
-	if (ei_status.dmaing) {
-		dev_err(&ax->dev->dev,
-			"%s: DMAing conflict in %s "
+	if (ei_local->dmaing) {
+		netdev_err(dev,
+			"DMAing conflict in %s "
 			"[DMAstat:%d][irqlock:%d].\n",
-			dev->name, __func__,
-			ei_status.dmaing, ei_status.irqlock);
+			__func__,
+			ei_local->dmaing, ei_local->irqlock);
 		return;
 	}
 
-	ei_status.dmaing |= 0x01;
+	ei_local->dmaing |= 0x01;
 
-	ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+	ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + NE_CMD);
 	ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
 	ei_outb(count >> 8, nic_base + EN0_RCNTHI);
 	ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO);
 	ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI);
 	ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
 
-	if (ei_status.word16) {
+	if (ei_local->word16) {
 		readsw(nic_base + NE_DATAPORT, buf, count >> 1);
 		if (count & 0x01)
 			buf[count-1] = ei_inb(nic_base + NE_DATAPORT);
@@ -243,34 +246,34 @@
 		readsb(nic_base + NE_DATAPORT, buf, count);
 	}
 
-	ei_status.dmaing &= ~1;
+	ei_local->dmaing &= ~1;
 }
 
 static void ax_block_output(struct net_device *dev, int count,
 			    const unsigned char *buf, const int start_page)
 {
 	struct ei_device *ei_local = netdev_priv(dev);
-	struct ax_device  *ax = to_ax_dev(dev);
 	void __iomem *nic_base = ei_local->mem;
 	unsigned long dma_start;
 
-	/* Round the count up for word writes.  Do we need to do this?
-	   What effect will an odd byte count have on the 8390?
-	   I should check someday. */
-
-	if (ei_status.word16 && (count & 0x01))
+	/*
+	 * Round the count up for word writes. Do we need to do this?
+	 * What effect will an odd byte count have on the 8390?  I
+	 * should check someday.
+	 */
+	if (ei_local->word16 && (count & 0x01))
 		count++;
 
 	/* This *shouldn't* happen. If it does, it's the last thing you'll see */
-	if (ei_status.dmaing) {
-		dev_err(&ax->dev->dev, "%s: DMAing conflict in %s."
+	if (ei_local->dmaing) {
+		netdev_err(dev, "DMAing conflict in %s."
 			"[DMAstat:%d][irqlock:%d]\n",
-			dev->name, __func__,
-		       ei_status.dmaing, ei_status.irqlock);
+			__func__,
+		       ei_local->dmaing, ei_local->irqlock);
 		return;
 	}
 
-	ei_status.dmaing |= 0x01;
+	ei_local->dmaing |= 0x01;
 	/* We should already be in page 0, but to be safe... */
 	ei_outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD);
 
@@ -278,250 +281,170 @@
 
 	/* Now the normal output. */
 	ei_outb(count & 0xff, nic_base + EN0_RCNTLO);
-	ei_outb(count >> 8,   nic_base + EN0_RCNTHI);
+	ei_outb(count >> 8, nic_base + EN0_RCNTHI);
 	ei_outb(0x00, nic_base + EN0_RSARLO);
 	ei_outb(start_page, nic_base + EN0_RSARHI);
 
 	ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
-	if (ei_status.word16) {
-		writesw(nic_base + NE_DATAPORT, buf, count>>1);
-	} else {
+	if (ei_local->word16)
+		writesw(nic_base + NE_DATAPORT, buf, count >> 1);
+	else
 		writesb(nic_base + NE_DATAPORT, buf, count);
-	}
 
 	dma_start = jiffies;
 
 	while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
-		if (jiffies - dma_start > 2*HZ/100) {		/* 20ms */
-			dev_warn(&ax->dev->dev,
-				 "%s: timeout waiting for Tx RDC.\n", dev->name);
+		if (jiffies - dma_start > 2 * HZ / 100) {		/* 20ms */
+			netdev_warn(dev, "timeout waiting for Tx RDC.\n");
 			ax_reset_8390(dev);
-			ax_NS8390_init(dev,1);
+			ax_NS8390_init(dev, 1);
 			break;
 		}
 	}
 
 	ei_outb(ENISR_RDC, nic_base + EN0_ISR);	/* Ack intr. */
-	ei_status.dmaing &= ~0x01;
+	ei_local->dmaing &= ~0x01;
 }
 
 /* definitions for accessing MII/EEPROM interface */
 
 #define AX_MEMR			EI_SHIFT(0x14)
-#define AX_MEMR_MDC		(1<<0)
-#define AX_MEMR_MDIR		(1<<1)
-#define AX_MEMR_MDI		(1<<2)
-#define AX_MEMR_MDO		(1<<3)
-#define AX_MEMR_EECS		(1<<4)
-#define AX_MEMR_EEI		(1<<5)
-#define AX_MEMR_EEO		(1<<6)
-#define AX_MEMR_EECLK		(1<<7)
+#define AX_MEMR_MDC		BIT(0)
+#define AX_MEMR_MDIR		BIT(1)
+#define AX_MEMR_MDI		BIT(2)
+#define AX_MEMR_MDO		BIT(3)
+#define AX_MEMR_EECS		BIT(4)
+#define AX_MEMR_EEI		BIT(5)
+#define AX_MEMR_EEO		BIT(6)
+#define AX_MEMR_EECLK		BIT(7)
 
-/* ax_mii_ei_outbits
- *
- * write the specified set of bits to the phy
-*/
-
-static void
-ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
+static void ax_handle_link_change(struct net_device *dev)
 {
-	struct ei_device *ei_local = netdev_priv(dev);
-	void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
-	unsigned int memr;
-
-	/* clock low, data to output mode */
-	memr = ei_inb(memr_addr);
-	memr &= ~(AX_MEMR_MDC | AX_MEMR_MDIR);
-	ei_outb(memr, memr_addr);
-
-	for (len--; len >= 0; len--) {
-		if (bits & (1 << len))
-			memr |= AX_MEMR_MDO;
-		else
-			memr &= ~AX_MEMR_MDO;
-
-		ei_outb(memr, memr_addr);
-
-		/* clock high */
-
-		ei_outb(memr | AX_MEMR_MDC, memr_addr);
-		udelay(1);
-
-		/* clock low */
-		ei_outb(memr, memr_addr);
-	}
-
-	/* leaves the clock line low, mdir input */
-	memr |= AX_MEMR_MDIR;
-	ei_outb(memr, (void __iomem *)dev->base_addr + AX_MEMR);
-}
-
-/* ax_phy_ei_inbits
- *
- * read a specified number of bits from the phy
-*/
-
-static unsigned int
-ax_phy_ei_inbits(struct net_device *dev, int no)
-{
-	struct ei_device *ei_local = netdev_priv(dev);
-	void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
-	unsigned int memr;
-	unsigned int result = 0;
-
-	/* clock low, data to input mode */
-	memr = ei_inb(memr_addr);
-	memr &= ~AX_MEMR_MDC;
-	memr |= AX_MEMR_MDIR;
-	ei_outb(memr, memr_addr);
-
-	for (no--; no >= 0; no--) {
-		ei_outb(memr | AX_MEMR_MDC, memr_addr);
-
-		udelay(1);
-
-		if (ei_inb(memr_addr) & AX_MEMR_MDI)
-			result |= (1<<no);
-
-		ei_outb(memr, memr_addr);
-	}
-
-	return result;
-}
-
-/* ax_phy_issueaddr
- *
- * use the low level bit shifting routines to send the address
- * and command to the specified phy
-*/
-
-static void
-ax_phy_issueaddr(struct net_device *dev, int phy_addr, int reg, int opc)
-{
-	if (phy_debug)
-		pr_debug("%s: dev %p, %04x, %04x, %d\n",
-			__func__, dev, phy_addr, reg, opc);
-
-	ax_mii_ei_outbits(dev, 0x3f, 6);	/* pre-amble */
-	ax_mii_ei_outbits(dev, 1, 2);		/* frame-start */
-	ax_mii_ei_outbits(dev, opc, 2);		/* op code */
-	ax_mii_ei_outbits(dev, phy_addr, 5);	/* phy address */
-	ax_mii_ei_outbits(dev, reg, 5);		/* reg address */
-}
-
-static int
-ax_phy_read(struct net_device *dev, int phy_addr, int reg)
-{
-	struct ei_device *ei_local = netdev_priv(dev);
-	unsigned long flags;
- 	unsigned int result;
-
-      	spin_lock_irqsave(&ei_local->page_lock, flags);
-
-	ax_phy_issueaddr(dev, phy_addr, reg, 2);
-
-	result = ax_phy_ei_inbits(dev, 17);
-	result &= ~(3<<16);
-
-      	spin_unlock_irqrestore(&ei_local->page_lock, flags);
-
-	if (phy_debug)
-		pr_debug("%s: %04x.%04x => read %04x\n", __func__,
-			 phy_addr, reg, result);
-
-	return result;
-}
-
-static void
-ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
-{
-	struct ei_device *ei = netdev_priv(dev);
 	struct ax_device  *ax = to_ax_dev(dev);
-	unsigned long flags;
+	struct phy_device *phy_dev = ax->phy_dev;
+	int status_change = 0;
 
-	dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n",
-		__func__, dev, phy_addr, reg, value);
+	if (phy_dev->link && ((ax->speed != phy_dev->speed) ||
+			     (ax->duplex != phy_dev->duplex))) {
 
-      	spin_lock_irqsave(&ei->page_lock, flags);
+		ax->speed = phy_dev->speed;
+		ax->duplex = phy_dev->duplex;
+		status_change = 1;
+	}
 
-	ax_phy_issueaddr(dev, phy_addr, reg, 1);
-	ax_mii_ei_outbits(dev, 2, 2);		/* send TA */
-	ax_mii_ei_outbits(dev, value, 16);
+	if (phy_dev->link != ax->link) {
+		if (!phy_dev->link) {
+			ax->speed = 0;
+			ax->duplex = -1;
+		}
+		ax->link = phy_dev->link;
 
-      	spin_unlock_irqrestore(&ei->page_lock, flags);
+		status_change = 1;
+	}
+
+	if (status_change)
+		phy_print_status(phy_dev);
 }
 
-static void ax_mii_expiry(unsigned long data)
+static int ax_mii_probe(struct net_device *dev)
 {
-	struct net_device *dev = (struct net_device *)data;
 	struct ax_device  *ax = to_ax_dev(dev);
-	unsigned long flags;
+	struct phy_device *phy_dev = NULL;
+	int ret;
 
-	spin_lock_irqsave(&ax->mii_lock, flags);
-	mii_check_media(&ax->mii, netif_msg_link(ax), 0);
-	spin_unlock_irqrestore(&ax->mii_lock, flags);
-
-	if (ax->running) {
-		ax->mii_timer.expires = jiffies + HZ*2;
-		add_timer(&ax->mii_timer);
+	/* find the first phy */
+	phy_dev = phy_find_first(ax->mii_bus);
+	if (!phy_dev) {
+		netdev_err(dev, "no PHY found\n");
+		return -ENODEV;
 	}
+
+	ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change, 0,
+				 PHY_INTERFACE_MODE_MII);
+	if (ret) {
+		netdev_err(dev, "Could not attach to PHY\n");
+		return ret;
+	}
+
+	/* mask with MAC supported features */
+	phy_dev->supported &= PHY_BASIC_FEATURES;
+	phy_dev->advertising = phy_dev->supported;
+
+	ax->phy_dev = phy_dev;
+
+	netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+		    phy_dev->drv->name, dev_name(&phy_dev->dev), phy_dev->irq);
+
+	return 0;
+}
+
+static void ax_phy_switch(struct net_device *dev, int on)
+{
+	struct ei_device *ei_local = netdev_priv(dev);
+	struct ax_device *ax = to_ax_dev(dev);
+
+	u8 reg_gpoc =  ax->plat->gpoc_val;
+
+	if (!!on)
+		reg_gpoc &= ~AX_GPOC_PPDSET;
+	else
+		reg_gpoc |= AX_GPOC_PPDSET;
+
+	ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17));
 }
 
 static int ax_open(struct net_device *dev)
 {
-	struct ax_device  *ax = to_ax_dev(dev);
-	struct ei_device *ei_local = netdev_priv(dev);
+	struct ax_device *ax = to_ax_dev(dev);
 	int ret;
 
-	dev_dbg(&ax->dev->dev, "%s: open\n", dev->name);
+	netdev_dbg(dev, "open\n");
 
 	ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags,
 			  dev->name, dev);
 	if (ret)
-		return ret;
-
-	ret = ax_ei_open(dev);
-	if (ret) {
-		free_irq(dev->irq, dev);
-		return ret;
-	}
+		goto failed_request_irq;
 
 	/* turn the phy on (if turned off) */
+	ax_phy_switch(dev, 1);
 
-	ei_outb(ax->plat->gpoc_val, ei_local->mem + EI_SHIFT(0x17));
+	ret = ax_mii_probe(dev);
+	if (ret)
+		goto failed_mii_probe;
+	phy_start(ax->phy_dev);
+
+	ret = ax_ei_open(dev);
+	if (ret)
+		goto failed_ax_ei_open;
+
 	ax->running = 1;
 
-	/* start the MII timer */
-
-	init_timer(&ax->mii_timer);
-
-	ax->mii_timer.expires  = jiffies+1;
-	ax->mii_timer.data     = (unsigned long) dev;
-	ax->mii_timer.function = ax_mii_expiry;
-
-	add_timer(&ax->mii_timer);
-
 	return 0;
+
+ failed_ax_ei_open:
+	phy_disconnect(ax->phy_dev);
+ failed_mii_probe:
+	ax_phy_switch(dev, 0);
+	free_irq(dev->irq, dev);
+ failed_request_irq:
+	return ret;
 }
 
 static int ax_close(struct net_device *dev)
 {
 	struct ax_device *ax = to_ax_dev(dev);
-	struct ei_device *ei_local = netdev_priv(dev);
 
-	dev_dbg(&ax->dev->dev, "%s: close\n", dev->name);
-
-	/* turn the phy off */
-
-	ei_outb(ax->plat->gpoc_val | (1<<6),
-	       ei_local->mem + EI_SHIFT(0x17));
+	netdev_dbg(dev, "close\n");
 
 	ax->running = 0;
 	wmb();
 
-	del_timer_sync(&ax->mii_timer);
 	ax_ei_close(dev);
 
+	/* turn the phy off */
+	ax_phy_switch(dev, 0);
+	phy_disconnect(ax->phy_dev);
+
 	free_irq(dev->irq, dev);
 	return 0;
 }
@@ -529,17 +452,15 @@
 static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 {
 	struct ax_device *ax = to_ax_dev(dev);
-	unsigned long flags;
-	int rc;
+	struct phy_device *phy_dev = ax->phy_dev;
 
 	if (!netif_running(dev))
 		return -EINVAL;
 
-	spin_lock_irqsave(&ax->mii_lock, flags);
-	rc = generic_mii_ioctl(&ax->mii, if_mii(req), cmd, NULL);
-	spin_unlock_irqrestore(&ax->mii_lock, flags);
+	if (!phy_dev)
+		return -ENODEV;
 
-	return rc;
+	return phy_mii_ioctl(phy_dev, req, cmd);
 }
 
 /* ethtool ops */
@@ -547,56 +468,40 @@
 static void ax_get_drvinfo(struct net_device *dev,
 			   struct ethtool_drvinfo *info)
 {
-	struct ax_device *ax = to_ax_dev(dev);
+	struct platform_device *pdev = to_platform_device(dev->dev.parent);
 
 	strcpy(info->driver, DRV_NAME);
 	strcpy(info->version, DRV_VERSION);
-	strcpy(info->bus_info, ax->dev->name);
+	strcpy(info->bus_info, pdev->name);
 }
 
 static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	struct ax_device *ax = to_ax_dev(dev);
-	unsigned long flags;
+	struct phy_device *phy_dev = ax->phy_dev;
 
-	spin_lock_irqsave(&ax->mii_lock, flags);
-	mii_ethtool_gset(&ax->mii, cmd);
-	spin_unlock_irqrestore(&ax->mii_lock, flags);
+	if (!phy_dev)
+		return -ENODEV;
 
-	return 0;
+	return phy_ethtool_gset(phy_dev, cmd);
 }
 
 static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	struct ax_device *ax = to_ax_dev(dev);
-	unsigned long flags;
-	int rc;
+	struct phy_device *phy_dev = ax->phy_dev;
 
-	spin_lock_irqsave(&ax->mii_lock, flags);
-	rc = mii_ethtool_sset(&ax->mii, cmd);
-	spin_unlock_irqrestore(&ax->mii_lock, flags);
+	if (!phy_dev)
+		return -ENODEV;
 
-	return rc;
-}
-
-static int ax_nway_reset(struct net_device *dev)
-{
-	struct ax_device *ax = to_ax_dev(dev);
-	return mii_nway_restart(&ax->mii);
-}
-
-static u32 ax_get_link(struct net_device *dev)
-{
-	struct ax_device *ax = to_ax_dev(dev);
-	return mii_link_ok(&ax->mii);
+	return phy_ethtool_sset(phy_dev, cmd);
 }
 
 static const struct ethtool_ops ax_ethtool_ops = {
 	.get_drvinfo		= ax_get_drvinfo,
 	.get_settings		= ax_get_settings,
 	.set_settings		= ax_set_settings,
-	.nway_reset		= ax_nway_reset,
-	.get_link		= ax_get_link,
+	.get_link		= ethtool_op_get_link,
 };
 
 #ifdef CONFIG_AX88796_93CX6
@@ -640,37 +545,131 @@
 	.ndo_get_stats		= ax_ei_get_stats,
 	.ndo_set_multicast_list = ax_ei_set_multicast_list,
 	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_set_mac_address	= eth_mac_addr,
 	.ndo_change_mtu		= eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= ax_ei_poll,
 #endif
 };
 
+static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level)
+{
+	struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+	if (level)
+		ax->reg_memr |= AX_MEMR_MDC;
+	else
+		ax->reg_memr &= ~AX_MEMR_MDC;
+
+	ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output)
+{
+	struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+	if (output)
+		ax->reg_memr &= ~AX_MEMR_MDIR;
+	else
+		ax->reg_memr |= AX_MEMR_MDIR;
+
+	ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value)
+{
+	struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+
+	if (value)
+		ax->reg_memr |= AX_MEMR_MDO;
+	else
+		ax->reg_memr &= ~AX_MEMR_MDO;
+
+	ei_outb(ax->reg_memr, ax->addr_memr);
+}
+
+static int ax_bb_get_data(struct mdiobb_ctrl *ctrl)
+{
+	struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl);
+	int reg_memr = ei_inb(ax->addr_memr);
+
+	return reg_memr & AX_MEMR_MDI ? 1 : 0;
+}
+
+static struct mdiobb_ops bb_ops = {
+	.owner = THIS_MODULE,
+	.set_mdc = ax_bb_mdc,
+	.set_mdio_dir = ax_bb_dir,
+	.set_mdio_data = ax_bb_set_data,
+	.get_mdio_data = ax_bb_get_data,
+};
+
 /* setup code */
 
+static int ax_mii_init(struct net_device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev->dev.parent);
+	struct ei_device *ei_local = netdev_priv(dev);
+	struct ax_device *ax = to_ax_dev(dev);
+	int err, i;
+
+	ax->bb_ctrl.ops = &bb_ops;
+	ax->addr_memr = ei_local->mem + AX_MEMR;
+	ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl);
+	if (!ax->mii_bus) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	ax->mii_bus->name = "ax88796_mii_bus";
+	ax->mii_bus->parent = dev->dev.parent;
+	snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+
+	ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+	if (!ax->mii_bus->irq) {
+		err = -ENOMEM;
+		goto out_free_mdio_bitbang;
+	}
+
+	for (i = 0; i < PHY_MAX_ADDR; i++)
+		ax->mii_bus->irq[i] = PHY_POLL;
+
+	err = mdiobus_register(ax->mii_bus);
+	if (err)
+		goto out_free_irq;
+
+	return 0;
+
+ out_free_irq:
+	kfree(ax->mii_bus->irq);
+ out_free_mdio_bitbang:
+	free_mdio_bitbang(ax->mii_bus);
+ out:
+	return err;
+}
+
 static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local)
 {
 	void __iomem *ioaddr = ei_local->mem;
 	struct ax_device *ax = to_ax_dev(dev);
 
-	/* Select page 0*/
-	ei_outb(E8390_NODMA+E8390_PAGE0+E8390_STOP, ioaddr + E8390_CMD);
+	/* Select page 0 */
+	ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_STOP, ioaddr + E8390_CMD);
 
 	/* set to byte access */
 	ei_outb(ax->plat->dcr_val & ~1, ioaddr + EN0_DCFG);
 	ei_outb(ax->plat->gpoc_val, ioaddr + EI_SHIFT(0x17));
 }
 
-/* ax_init_dev
+/*
+ * ax_init_dev
  *
  * initialise the specified device, taking care to note the MAC
  * address it may already have (if configured), ensure
  * the device is ready to be used by lib8390.c and registerd with
  * the network layer.
  */
-
-static int ax_init_dev(struct net_device *dev, int first_init)
+static int ax_init_dev(struct net_device *dev)
 {
 	struct ei_device *ei_local = netdev_priv(dev);
 	struct ax_device *ax = to_ax_dev(dev);
@@ -690,23 +689,23 @@
 
 	/* read the mac from the card prom if we need it */
 
-	if (first_init && ax->plat->flags & AXFLG_HAS_EEPROM) {
+	if (ax->plat->flags & AXFLG_HAS_EEPROM) {
 		unsigned char SA_prom[32];
 
-		for(i = 0; i < sizeof(SA_prom); i+=2) {
+		for (i = 0; i < sizeof(SA_prom); i += 2) {
 			SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT);
-			SA_prom[i+1] = ei_inb(ioaddr + NE_DATAPORT);
+			SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT);
 		}
 
 		if (ax->plat->wordlength == 2)
 			for (i = 0; i < 16; i++)
 				SA_prom[i] = SA_prom[i+i];
 
-		memcpy(dev->dev_addr,  SA_prom, 6);
+		memcpy(dev->dev_addr, SA_prom, 6);
 	}
 
 #ifdef CONFIG_AX88796_93CX6
-	if (first_init && ax->plat->flags & AXFLG_HAS_93CX6) {
+	if (ax->plat->flags & AXFLG_HAS_93CX6) {
 		unsigned char mac_addr[6];
 		struct eeprom_93cx6 eeprom;
 
@@ -719,7 +718,7 @@
 				       (__le16 __force *)mac_addr,
 				       sizeof(mac_addr) >> 1);
 
-		memcpy(dev->dev_addr,  mac_addr, 6);
+		memcpy(dev->dev_addr, mac_addr, 6);
 	}
 #endif
 	if (ax->plat->wordlength == 2) {
@@ -732,67 +731,56 @@
 		stop_page = NE1SM_STOP_PG;
 	}
 
-	/* load the mac-address from the device if this is the
-	 * first time we've initialised */
-
-	if (first_init) {
-		if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
-			ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
-				ei_local->mem + E8390_CMD); /* 0x61 */
-			for (i = 0; i < ETHER_ADDR_LEN; i++)
-				dev->dev_addr[i] =
-					ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
-		}
-
-		if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
-		     ax->plat->mac_addr)
-			memcpy(dev->dev_addr, ax->plat->mac_addr,
-				ETHER_ADDR_LEN);
+	/* load the mac-address from the device */
+	if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
+		ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
+			ei_local->mem + E8390_CMD); /* 0x61 */
+		for (i = 0; i < ETHER_ADDR_LEN; i++)
+			dev->dev_addr[i] =
+				ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
 	}
 
+	if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
+	    ax->plat->mac_addr)
+		memcpy(dev->dev_addr, ax->plat->mac_addr,
+		       ETHER_ADDR_LEN);
+
 	ax_reset_8390(dev);
 
-	ei_status.name = "AX88796";
-	ei_status.tx_start_page = start_page;
-	ei_status.stop_page = stop_page;
-	ei_status.word16 = (ax->plat->wordlength == 2);
-	ei_status.rx_start_page = start_page + TX_PAGES;
+	ei_local->name = "AX88796";
+	ei_local->tx_start_page = start_page;
+	ei_local->stop_page = stop_page;
+	ei_local->word16 = (ax->plat->wordlength == 2);
+	ei_local->rx_start_page = start_page + TX_PAGES;
 
 #ifdef PACKETBUF_MEMSIZE
-	 /* Allow the packet buffer size to be overridden by know-it-alls. */
-	ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
+	/* Allow the packet buffer size to be overridden by know-it-alls. */
+	ei_local->stop_page = ei_local->tx_start_page + PACKETBUF_MEMSIZE;
 #endif
 
-	ei_status.reset_8390	= &ax_reset_8390;
-	ei_status.block_input	= &ax_block_input;
-	ei_status.block_output	= &ax_block_output;
-	ei_status.get_8390_hdr	= &ax_get_8390_hdr;
-	ei_status.priv = 0;
+	ei_local->reset_8390 = &ax_reset_8390;
+	ei_local->block_input = &ax_block_input;
+	ei_local->block_output = &ax_block_output;
+	ei_local->get_8390_hdr = &ax_get_8390_hdr;
+	ei_local->priv = 0;
 
-	dev->netdev_ops		= &ax_netdev_ops;
-	dev->ethtool_ops	= &ax_ethtool_ops;
+	dev->netdev_ops = &ax_netdev_ops;
+	dev->ethtool_ops = &ax_ethtool_ops;
 
-	ax->msg_enable		= NETIF_MSG_LINK;
-	ax->mii.phy_id_mask	= 0x1f;
-	ax->mii.reg_num_mask	= 0x1f;
-	ax->mii.phy_id		= 0x10;		/* onboard phy */
-	ax->mii.force_media	= 0;
-	ax->mii.full_duplex	= 0;
-	ax->mii.mdio_read	= ax_phy_read;
-	ax->mii.mdio_write	= ax_phy_write;
-	ax->mii.dev		= dev;
+	ret = ax_mii_init(dev);
+	if (ret)
+		goto out_irq;
 
 	ax_NS8390_init(dev, 0);
 
-	if (first_init)
-		dev_info(&ax->dev->dev, "%dbit, irq %d, %lx, MAC: %pM\n",
-			 ei_status.word16 ? 16:8, dev->irq, dev->base_addr,
-			 dev->dev_addr);
-
 	ret = register_netdev(dev);
 	if (ret)
 		goto out_irq;
 
+	netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n",
+		    ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr,
+		    dev->dev_addr);
+
 	return 0;
 
  out_irq:
@@ -802,24 +790,24 @@
 	return ret;
 }
 
-static int ax_remove(struct platform_device *_dev)
+static int ax_remove(struct platform_device *pdev)
 {
-	struct net_device *dev = platform_get_drvdata(_dev);
-	struct ax_device  *ax;
-
-	ax = to_ax_dev(dev);
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct ei_device *ei_local = netdev_priv(dev);
+	struct ax_device *ax = to_ax_dev(dev);
+	struct resource *mem;
 
 	unregister_netdev(dev);
 	free_irq(dev->irq, dev);
 
-	iounmap(ei_status.mem);
-	release_resource(ax->mem);
-	kfree(ax->mem);
+	iounmap(ei_local->mem);
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(mem->start, resource_size(mem));
 
 	if (ax->map2) {
 		iounmap(ax->map2);
-		release_resource(ax->mem2);
-		kfree(ax->mem2);
+		mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		release_mem_region(mem->start, resource_size(mem));
 	}
 
 	free_netdev(dev);
@@ -827,19 +815,20 @@
 	return 0;
 }
 
-/* ax_probe
+/*
+ * ax_probe
  *
  * This is the entry point when the platform device system uses to
- * notify us of a new device to attach to. Allocate memory, find
- * the resources and information passed, and map the necessary registers.
-*/
-
+ * notify us of a new device to attach to. Allocate memory, find the
+ * resources and information passed, and map the necessary registers.
+ */
 static int ax_probe(struct platform_device *pdev)
 {
 	struct net_device *dev;
-	struct ax_device  *ax;
-	struct resource   *res;
-	size_t size;
+	struct ei_device *ei_local;
+	struct ax_device *ax;
+	struct resource *irq, *mem, *mem2;
+	resource_size_t mem_size, mem2_size = 0;
 	int ret = 0;
 
 	dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
@@ -847,120 +836,107 @@
 		return -ENOMEM;
 
 	/* ok, let's setup our device */
+	SET_NETDEV_DEV(dev, &pdev->dev);
+	ei_local = netdev_priv(dev);
 	ax = to_ax_dev(dev);
 
-	memset(ax, 0, sizeof(struct ax_device));
-
-	spin_lock_init(&ax->mii_lock);
-
-	ax->dev = pdev;
 	ax->plat = pdev->dev.platform_data;
 	platform_set_drvdata(pdev, dev);
 
-	ei_status.rxcr_base  = ax->plat->rcr_val;
+	ei_local->rxcr_base = ax->plat->rcr_val;
 
 	/* find the platform resources */
-
-	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (res == NULL) {
+	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!irq) {
 		dev_err(&pdev->dev, "no IRQ specified\n");
 		ret = -ENXIO;
 		goto exit_mem;
 	}
 
-	dev->irq = res->start;
-	ax->irqflags = res->flags & IRQF_TRIGGER_MASK;
+	dev->irq = irq->start;
+	ax->irqflags = irq->flags & IRQF_TRIGGER_MASK;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (res == NULL) {
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem) {
 		dev_err(&pdev->dev, "no MEM specified\n");
 		ret = -ENXIO;
 		goto exit_mem;
 	}
 
-	size = (res->end - res->start) + 1;
+	mem_size = resource_size(mem);
 
-	/* setup the register offsets from either the platform data
-	 * or by using the size of the resource provided */
-
+	/*
+	 * setup the register offsets from either the platform data or
+	 * by using the size of the resource provided
+	 */
 	if (ax->plat->reg_offsets)
-		ei_status.reg_offset = ax->plat->reg_offsets;
+		ei_local->reg_offset = ax->plat->reg_offsets;
 	else {
-		ei_status.reg_offset = ax->reg_offsets;
+		ei_local->reg_offset = ax->reg_offsets;
 		for (ret = 0; ret < 0x18; ret++)
-			ax->reg_offsets[ret] = (size / 0x18) * ret;
+			ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
 	}
 
-	ax->mem = request_mem_region(res->start, size, pdev->name);
-	if (ax->mem == NULL) {
+	if (!request_mem_region(mem->start, mem_size, pdev->name)) {
 		dev_err(&pdev->dev, "cannot reserve registers\n");
- 		ret = -ENXIO;
+		ret = -ENXIO;
 		goto exit_mem;
 	}
 
-	ei_status.mem = ioremap(res->start, size);
-	dev->base_addr = (unsigned long)ei_status.mem;
+	ei_local->mem = ioremap(mem->start, mem_size);
+	dev->base_addr = (unsigned long)ei_local->mem;
 
-	if (ei_status.mem == NULL) {
-		dev_err(&pdev->dev, "Cannot ioremap area (%08llx,%08llx)\n",
-			(unsigned long long)res->start,
-			(unsigned long long)res->end);
+	if (ei_local->mem == NULL) {
+		dev_err(&pdev->dev, "Cannot ioremap area %pR\n", mem);
 
- 		ret = -ENXIO;
+		ret = -ENXIO;
 		goto exit_req;
 	}
 
 	/* look for reset area */
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	if (res == NULL) {
+	mem2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!mem2) {
 		if (!ax->plat->reg_offsets) {
 			for (ret = 0; ret < 0x20; ret++)
-				ax->reg_offsets[ret] = (size / 0x20) * ret;
+				ax->reg_offsets[ret] = (mem_size / 0x20) * ret;
 		}
-
-		ax->map2 = NULL;
 	} else {
- 		size = (res->end - res->start) + 1;
+		mem2_size = resource_size(mem2);
 
-		ax->mem2 = request_mem_region(res->start, size, pdev->name);
-		if (ax->mem2 == NULL) {
+		if (!request_mem_region(mem2->start, mem2_size, pdev->name)) {
 			dev_err(&pdev->dev, "cannot reserve registers\n");
 			ret = -ENXIO;
 			goto exit_mem1;
 		}
 
-		ax->map2 = ioremap(res->start, size);
-		if (ax->map2 == NULL) {
+		ax->map2 = ioremap(mem2->start, mem2_size);
+		if (!ax->map2) {
 			dev_err(&pdev->dev, "cannot map reset register\n");
 			ret = -ENXIO;
 			goto exit_mem2;
 		}
 
-		ei_status.reg_offset[0x1f] = ax->map2 - ei_status.mem;
+		ei_local->reg_offset[0x1f] = ax->map2 - ei_local->mem;
 	}
 
 	/* got resources, now initialise and register device */
-
-	ret = ax_init_dev(dev, 1);
+	ret = ax_init_dev(dev);
 	if (!ret)
 		return 0;
 
-	if (ax->map2 == NULL)
+	if (!ax->map2)
 		goto exit_mem1;
 
 	iounmap(ax->map2);
 
  exit_mem2:
-	release_resource(ax->mem2);
-	kfree(ax->mem2);
+	release_mem_region(mem2->start, mem2_size);
 
  exit_mem1:
-	iounmap(ei_status.mem);
+	iounmap(ei_local->mem);
 
  exit_req:
-	release_resource(ax->mem);
-	kfree(ax->mem);
+	release_mem_region(mem->start, mem_size);
 
  exit_mem:
 	free_netdev(dev);
@@ -974,7 +950,7 @@
 static int ax_suspend(struct platform_device *dev, pm_message_t state)
 {
 	struct net_device *ndev = platform_get_drvdata(dev);
-	struct ax_device  *ax = to_ax_dev(ndev);
+	struct ax_device *ax = to_ax_dev(ndev);
 
 	ax->resume_open = ax->running;
 
@@ -987,7 +963,7 @@
 static int ax_resume(struct platform_device *pdev)
 {
 	struct net_device *ndev = platform_get_drvdata(pdev);
-	struct ax_device  *ax = to_ax_dev(ndev);
+	struct ax_device *ax = to_ax_dev(ndev);
 
 	ax_initial_setup(ndev, netdev_priv(ndev));
 	ax_NS8390_init(ndev, ax->resume_open);
@@ -1001,7 +977,7 @@
 
 #else
 #define ax_suspend NULL
-#define ax_resume  NULL
+#define ax_resume NULL
 #endif
 
 static struct platform_driver axdrv = {
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index add0b93..ed709a5 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -225,6 +225,10 @@
 	u32 cache_line_barrier[15];
 };
 
+struct be_drv_stats {
+	u8 be_on_die_temperature;
+};
+
 struct be_vf_cfg {
 	unsigned char vf_mac_addr[ETH_ALEN];
 	u32 vf_if_handle;
@@ -234,6 +238,7 @@
 };
 
 #define BE_INVALID_PMAC_ID		0xffffffff
+
 struct be_adapter {
 	struct pci_dev *pdev;
 	struct net_device *netdev;
@@ -269,6 +274,7 @@
 	u32 big_page_size;	/* Compounded page size shared by rx wrbs */
 
 	u8 msix_vec_next_idx;
+	struct be_drv_stats drv_stats;
 
 	struct vlan_group *vlan_grp;
 	u16 vlans_added;
@@ -281,6 +287,7 @@
 	struct be_dma_mem stats_cmd;
 	/* Work queue used to perform periodic tasks like getting statistics */
 	struct delayed_work work;
+	u16 work_counter;
 
 	/* Ethtool knobs and info */
 	bool rx_csum; 		/* BE card must perform rx-checksumming */
@@ -298,7 +305,7 @@
 	u32 rx_fc;		/* Rx flow control */
 	u32 tx_fc;		/* Tx flow control */
 	bool ue_detected;
-	bool stats_ioctl_sent;
+	bool stats_cmd_sent;
 	int link_speed;
 	u8 port_type;
 	u8 transceiver;
@@ -311,6 +318,8 @@
 	struct be_vf_cfg vf_cfg[BE_MAX_VF];
 	u8 is_virtfn;
 	u32 sli_family;
+	u8 hba_port_num;
+	u16 pvid;
 };
 
 #define be_physfn(adapter) (!adapter->is_virtfn)
@@ -450,9 +459,8 @@
 	mac[5] = (u8)(addr & 0xFF);
 	mac[4] = (u8)((addr >> 8) & 0xFF);
 	mac[3] = (u8)((addr >> 16) & 0xFF);
-	mac[2] = 0xC9;
-	mac[1] = 0x00;
-	mac[0] = 0x00;
+	/* Use the OUI from the current MAC address */
+	memcpy(mac, adapter->netdev->dev_addr, 3);
 }
 
 extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 0c7811f..1822ecd 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -18,11 +18,20 @@
 #include "be.h"
 #include "be_cmds.h"
 
+/* Must be a power of 2 or else MODULO will BUG_ON */
+static int be_get_temp_freq = 32;
+
 static void be_mcc_notify(struct be_adapter *adapter)
 {
 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
 	u32 val = 0;
 
+	if (adapter->eeh_err) {
+		dev_info(&adapter->pdev->dev,
+			"Error in Card Detected! Cannot issue commands\n");
+		return;
+	}
+
 	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
 	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
 
@@ -75,7 +84,7 @@
 			be_dws_le_to_cpu(&resp->hw_stats,
 						sizeof(resp->hw_stats));
 			netdev_stats_update(adapter);
-			adapter->stats_ioctl_sent = false;
+			adapter->stats_cmd_sent = false;
 		}
 	} else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
 		   (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
@@ -102,6 +111,7 @@
 {
 	if (evt->valid) {
 		adapter->vlan_prio_bmap = evt->available_priority_bmap;
+		adapter->recommended_prio &= ~VLAN_PRIO_MASK;
 		adapter->recommended_prio =
 			evt->reco_default_priority << VLAN_PRIO_SHIFT;
 	}
@@ -117,6 +127,16 @@
 	}
 }
 
+/*Grp5 PVID evt*/
+static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
+		struct be_async_event_grp5_pvid_state *evt)
+{
+	if (evt->enabled)
+		adapter->pvid = evt->tag;
+	else
+		adapter->pvid = 0;
+}
+
 static void be_async_grp5_evt_process(struct be_adapter *adapter,
 		u32 trailer, struct be_mcc_compl *evt)
 {
@@ -134,6 +154,10 @@
 		be_async_grp5_qos_speed_process(adapter,
 		(struct be_async_event_grp5_qos_link_speed *)evt);
 	break;
+	case ASYNC_EVENT_PVID_STATE:
+		be_async_grp5_pvid_state_process(adapter,
+		(struct be_async_event_grp5_pvid_state *)evt);
+	break;
 	default:
 		dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
 		break;
@@ -216,6 +240,9 @@
 	int i, num, status = 0;
 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 
+	if (adapter->eeh_err)
+		return -EIO;
+
 	for (i = 0; i < mcc_timeout; i++) {
 		num = be_process_mcc(adapter, &status);
 		if (num)
@@ -245,6 +272,12 @@
 	int msecs = 0;
 	u32 ready;
 
+	if (adapter->eeh_err) {
+		dev_err(&adapter->pdev->dev,
+			"Error detected in card.Cannot issue commands\n");
+		return -EIO;
+	}
+
 	do {
 		ready = ioread32(db);
 		if (ready == 0xffffffff) {
@@ -598,7 +631,7 @@
 
 /* Uses synchronous MCCQ */
 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
-		u32 if_id, u32 *pmac_id)
+		u32 if_id, u32 *pmac_id, u32 domain)
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_pmac_add *req;
@@ -619,6 +652,7 @@
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
 
+	req->hdr.domain = domain;
 	req->if_id = cpu_to_le32(if_id);
 	memcpy(req->mac_address, mac_addr, ETH_ALEN);
 
@@ -634,7 +668,7 @@
 }
 
 /* Uses synchronous MCCQ */
-int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_pmac_del *req;
@@ -655,6 +689,7 @@
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
 
+	req->hdr.domain = dom;
 	req->if_id = cpu_to_le32(if_id);
 	req->pmac_id = cpu_to_le32(pmac_id);
 
@@ -995,7 +1030,7 @@
 }
 
 /* Uses mbox */
-int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
+int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
 {
 	struct be_mcc_wrb *wrb;
 	struct be_cmd_req_if_destroy *req;
@@ -1016,6 +1051,7 @@
 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
 		OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
 
+	req->hdr.domain = domain;
 	req->interface_id = cpu_to_le32(interface_id);
 
 	status = be_mbox_notify_wait(adapter);
@@ -1036,6 +1072,9 @@
 	struct be_sge *sge;
 	int status = 0;
 
+	if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
+		be_cmd_get_die_temperature(adapter);
+
 	spin_lock_bh(&adapter->mcc_lock);
 
 	wrb = wrb_from_mccq(adapter);
@@ -1056,7 +1095,7 @@
 	sge->len = cpu_to_le32(nonemb_cmd->size);
 
 	be_mcc_notify(adapter);
-	adapter->stats_ioctl_sent = true;
+	adapter->stats_cmd_sent = true;
 
 err:
 	spin_unlock_bh(&adapter->mcc_lock);
@@ -1103,6 +1142,44 @@
 	return status;
 }
 
+/* Uses synchronous mcc */
+int be_cmd_get_die_temperature(struct be_adapter *adapter)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_get_cntl_addnl_attribs *req;
+	int status;
+
+	spin_lock_bh(&adapter->mcc_lock);
+
+	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+	req = embedded_payload(wrb);
+
+	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+			OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
+
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+		OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
+
+	status = be_mcc_notify_wait(adapter);
+	if (!status) {
+		struct be_cmd_resp_get_cntl_addnl_attribs *resp =
+						embedded_payload(wrb);
+		adapter->drv_stats.be_on_die_temperature =
+						resp->on_die_temperature;
+	}
+	/* If IOCTL fails once, do not bother issuing it again */
+	else
+		be_get_temp_freq = 0;
+
+err:
+	spin_unlock_bh(&adapter->mcc_lock);
+	return status;
+}
+
 /* Uses Mbox */
 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
 {
@@ -1786,6 +1863,10 @@
 	spin_lock_bh(&adapter->mcc_lock);
 
 	wrb = wrb_from_mccq(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
 	req = nonemb_cmd->va;
 	sge = nonembedded_sgl(wrb);
 
@@ -1801,6 +1882,7 @@
 
 	status = be_mcc_notify_wait(adapter);
 
+err:
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
 }
@@ -1863,8 +1945,8 @@
 			OPCODE_COMMON_SET_QOS, sizeof(*req));
 
 	req->hdr.domain = domain;
-	req->valid_bits = BE_QOS_BITS_NIC;
-	req->max_bps_nic = bps;
+	req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
+	req->max_bps_nic = cpu_to_le32(bps);
 
 	status = be_mcc_notify_wait(adapter);
 
@@ -1872,3 +1954,57 @@
 	spin_unlock_bh(&adapter->mcc_lock);
 	return status;
 }
+
+int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
+{
+	struct be_mcc_wrb *wrb;
+	struct be_cmd_req_cntl_attribs *req;
+	struct be_cmd_resp_cntl_attribs *resp;
+	struct be_sge *sge;
+	int status;
+	int payload_len = max(sizeof(*req), sizeof(*resp));
+	struct mgmt_controller_attrib *attribs;
+	struct be_dma_mem attribs_cmd;
+
+	memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
+	attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
+	attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
+						&attribs_cmd.dma);
+	if (!attribs_cmd.va) {
+		dev_err(&adapter->pdev->dev,
+				"Memory allocation failure\n");
+		return -ENOMEM;
+	}
+
+	if (mutex_lock_interruptible(&adapter->mbox_lock))
+		return -1;
+
+	wrb = wrb_from_mbox(adapter);
+	if (!wrb) {
+		status = -EBUSY;
+		goto err;
+	}
+	req = attribs_cmd.va;
+	sge = nonembedded_sgl(wrb);
+
+	be_wrb_hdr_prepare(wrb, payload_len, false, 1,
+			OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
+	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+			 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
+	sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
+	sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
+	sge->len = cpu_to_le32(attribs_cmd.size);
+
+	status = be_mbox_notify_wait(adapter);
+	if (!status) {
+		attribs = (struct mgmt_controller_attrib *)( attribs_cmd.va +
+					sizeof(struct be_cmd_resp_hdr));
+		adapter->hba_port_num = attribs->hba_attribs.phy_port;
+	}
+
+err:
+	mutex_unlock(&adapter->mbox_lock);
+	pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
+					attribs_cmd.dma);
+	return status;
+}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 83d15c8..93e5768 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -88,6 +88,7 @@
 #define ASYNC_EVENT_CODE_GRP_5		0x5
 #define ASYNC_EVENT_QOS_SPEED		0x1
 #define ASYNC_EVENT_COS_PRIORITY	0x2
+#define ASYNC_EVENT_PVID_STATE		0x3
 struct be_async_event_trailer {
 	u32 code;
 };
@@ -134,6 +135,18 @@
 	struct be_async_event_trailer trailer;
 } __packed;
 
+/* When the event code of an async trailer is GRP5 and event type is
+ * PVID state, the mcc_compl must be interpreted as follows
+ */
+struct be_async_event_grp5_pvid_state {
+	u8 enabled;
+	u8 rsvd0;
+	u16 tag;
+	u32 event_tag;
+	u32 rsvd1;
+	struct be_async_event_trailer trailer;
+} __packed;
+
 struct be_mcc_mailbox {
 	struct be_mcc_wrb wrb;
 	struct be_mcc_compl compl;
@@ -156,6 +169,7 @@
 #define OPCODE_COMMON_SET_QOS				28
 #define OPCODE_COMMON_MCC_CREATE_EXT			90
 #define OPCODE_COMMON_SEEPROM_READ			30
+#define OPCODE_COMMON_GET_CNTL_ATTRIBUTES               32
 #define OPCODE_COMMON_NTWK_RX_FILTER    		34
 #define OPCODE_COMMON_GET_FW_VERSION			35
 #define OPCODE_COMMON_SET_FLOW_CONTROL			36
@@ -176,6 +190,7 @@
 #define OPCODE_COMMON_GET_BEACON_STATE			70
 #define OPCODE_COMMON_READ_TRANSRECV_DATA		73
 #define OPCODE_COMMON_GET_PHY_DETAILS			102
+#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES	121
 
 #define OPCODE_ETH_RSS_CONFIG				1
 #define OPCODE_ETH_ACPI_CONFIG				2
@@ -619,7 +634,10 @@
 	u32 rx_drops_invalid_ring;	/* dword 145*/
 	u32 forwarded_packets;	/* dword 146*/
 	u32 rx_drops_mtu;	/* dword 147*/
-	u32 rsvd0[15];
+	u32 rsvd0[7];
+	u32 port0_jabber_events;
+	u32 port1_jabber_events;
+	u32 rsvd1[6];
 };
 
 struct be_erx_stats {
@@ -630,11 +648,16 @@
 	u32 debug_pmem_pbuf_dealloc;       /* dword 47*/
 };
 
+struct be_pmem_stats {
+	u32 eth_red_drops;
+	u32 rsvd[4];
+};
+
 struct be_hw_stats {
 	struct be_rxf_stats rxf;
 	u32 rsvd[48];
 	struct be_erx_stats erx;
-	u32 rsvd1[6];
+	struct be_pmem_stats pmem;
 };
 
 struct be_cmd_req_get_stats {
@@ -647,6 +670,20 @@
 	struct be_hw_stats hw_stats;
 };
 
+struct be_cmd_req_get_cntl_addnl_attribs {
+	struct be_cmd_req_hdr hdr;
+	u8 rsvd[8];
+};
+
+struct be_cmd_resp_get_cntl_addnl_attribs {
+	struct be_cmd_resp_hdr hdr;
+	u16 ipl_file_number;
+	u8 ipl_file_version;
+	u8 rsvd0;
+	u8 on_die_temperature; /* in degrees centigrade*/
+	u8 rsvd1[3];
+};
+
 struct be_cmd_req_vlan_config {
 	struct be_cmd_req_hdr hdr;
 	u8 interface_id;
@@ -994,17 +1031,29 @@
 	u32 rsvd;
 };
 
+/*********************** Controller Attributes ***********************/
+struct be_cmd_req_cntl_attribs {
+	struct be_cmd_req_hdr hdr;
+};
+
+struct be_cmd_resp_cntl_attribs {
+	struct be_cmd_resp_hdr hdr;
+	struct mgmt_controller_attrib attribs;
+};
+
 extern int be_pci_fnum_get(struct be_adapter *adapter);
 extern int be_cmd_POST(struct be_adapter *adapter);
 extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
 			u8 type, bool permanent, u32 if_handle);
 extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
-			u32 if_id, u32 *pmac_id);
-extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
+			u32 if_id, u32 *pmac_id, u32 domain);
+extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
+			u32 pmac_id, u32 domain);
 extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
 			u32 en_flags, u8 *mac, bool pmac_invalid,
 			u32 *if_handle, u32 *pmac_id, u32 domain);
-extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
+extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
+			u32 domain);
 extern int be_cmd_eq_create(struct be_adapter *adapter,
 			struct be_queue_info *eq, int eq_delay);
 extern int be_cmd_cq_create(struct be_adapter *adapter,
@@ -1076,4 +1125,6 @@
 		struct be_dma_mem *cmd);
 extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
 extern void be_detect_dump_ue(struct be_adapter *adapter);
+extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
+extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
 
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index b4be027..6e5e433 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -26,7 +26,8 @@
 	int offset;
 };
 
-enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT};
+enum {NETSTAT, PORTSTAT, MISCSTAT, DRVSTAT_TX, DRVSTAT_RX, ERXSTAT,
+			PMEMSTAT, DRVSTAT};
 #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \
 					offsetof(_struct, field)
 #define NETSTAT_INFO(field) 	#field, NETSTAT,\
@@ -43,6 +44,11 @@
 						field)
 #define ERXSTAT_INFO(field) 	#field, ERXSTAT,\
 					FIELDINFO(struct be_erx_stats, field)
+#define PMEMSTAT_INFO(field) 	#field, PMEMSTAT,\
+					FIELDINFO(struct be_pmem_stats, field)
+#define	DRVSTAT_INFO(field)	#field, DRVSTAT,\
+					FIELDINFO(struct be_drv_stats, \
+						field)
 
 static const struct be_ethtool_stat et_stats[] = {
 	{NETSTAT_INFO(rx_packets)},
@@ -99,7 +105,11 @@
 	{MISCSTAT_INFO(rx_drops_too_many_frags)},
 	{MISCSTAT_INFO(rx_drops_invalid_ring)},
 	{MISCSTAT_INFO(forwarded_packets)},
-	{MISCSTAT_INFO(rx_drops_mtu)}
+	{MISCSTAT_INFO(rx_drops_mtu)},
+	{MISCSTAT_INFO(port0_jabber_events)},
+	{MISCSTAT_INFO(port1_jabber_events)},
+	{PMEMSTAT_INFO(eth_red_drops)},
+	{DRVSTAT_INFO(be_on_die_temperature)}
 };
 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
 
@@ -121,7 +131,7 @@
 	"MAC Loopback test",
 	"PHY Loopback test",
 	"External Loopback test",
-	"DDR DMA test"
+	"DDR DMA test",
 	"Link test"
 };
 
@@ -276,6 +286,12 @@
 		case MISCSTAT:
 			p = &hw_stats->rxf;
 			break;
+		case PMEMSTAT:
+			p = &hw_stats->pmem;
+			break;
+		case DRVSTAT:
+			p = &adapter->drv_stats;
+			break;
 		}
 
 		p = (u8 *)p + et_stats[i].offset;
@@ -376,8 +392,9 @@
 		}
 
 		phy_cmd.size = sizeof(struct be_cmd_req_get_phy_info);
-		phy_cmd.va = pci_alloc_consistent(adapter->pdev, phy_cmd.size,
-					&phy_cmd.dma);
+		phy_cmd.va = dma_alloc_coherent(&adapter->pdev->dev,
+						phy_cmd.size, &phy_cmd.dma,
+						GFP_KERNEL);
 		if (!phy_cmd.va) {
 			dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
 			return -ENOMEM;
@@ -416,8 +433,8 @@
 		adapter->port_type = ecmd->port;
 		adapter->transceiver = ecmd->transceiver;
 		adapter->autoneg = ecmd->autoneg;
-		pci_free_consistent(adapter->pdev, phy_cmd.size,
-					phy_cmd.va, phy_cmd.dma);
+		dma_free_coherent(&adapter->pdev->dev, phy_cmd.size, phy_cmd.va,
+				  phy_cmd.dma);
 	} else {
 		ecmd->speed = adapter->link_speed;
 		ecmd->port = adapter->port_type;
@@ -496,7 +513,7 @@
 	int status;
 	u32 cur;
 
-	be_cmd_get_beacon_state(adapter, adapter->port_num, &cur);
+	be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur);
 
 	if (cur == BEACON_STATE_ENABLED)
 		return 0;
@@ -504,23 +521,34 @@
 	if (data < 2)
 		data = 2;
 
-	status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
+	status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
 			BEACON_STATE_ENABLED);
 	set_current_state(TASK_INTERRUPTIBLE);
 	schedule_timeout(data*HZ);
 
-	status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
+	status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
 			BEACON_STATE_DISABLED);
 
 	return status;
 }
 
+static bool
+be_is_wol_supported(struct be_adapter *adapter)
+{
+	if (!be_physfn(adapter))
+		return false;
+	else
+		return true;
+}
+
 static void
 be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 {
 	struct be_adapter *adapter = netdev_priv(netdev);
 
-	wol->supported = WAKE_MAGIC;
+	if (be_is_wol_supported(adapter))
+		wol->supported = WAKE_MAGIC;
+
 	if (adapter->wol)
 		wol->wolopts = WAKE_MAGIC;
 	else
@@ -536,7 +564,7 @@
 	if (wol->wolopts & ~WAKE_MAGIC)
 		return -EINVAL;
 
-	if (wol->wolopts & WAKE_MAGIC)
+	if ((wol->wolopts & WAKE_MAGIC) && be_is_wol_supported(adapter))
 		adapter->wol = true;
 	else
 		adapter->wol = false;
@@ -554,8 +582,8 @@
 	};
 
 	ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
-	ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
-					&ddrdma_cmd.dma);
+	ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
+					   &ddrdma_cmd.dma, GFP_KERNEL);
 	if (!ddrdma_cmd.va) {
 		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
 		return -ENOMEM;
@@ -569,20 +597,20 @@
 	}
 
 err:
-	pci_free_consistent(adapter->pdev, ddrdma_cmd.size,
-			ddrdma_cmd.va, ddrdma_cmd.dma);
+	dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
+			  ddrdma_cmd.dma);
 	return ret;
 }
 
 static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
 				u64 *status)
 {
-	be_cmd_set_loopback(adapter, adapter->port_num,
+	be_cmd_set_loopback(adapter, adapter->hba_port_num,
 				loopback_type, 1);
-	*status = be_cmd_loopback_test(adapter, adapter->port_num,
+	*status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
 				loopback_type, 1500,
 				2, 0xabc);
-	be_cmd_set_loopback(adapter, adapter->port_num,
+	be_cmd_set_loopback(adapter, adapter->hba_port_num,
 				BE_NO_LOOPBACK, 1);
 	return *status;
 }
@@ -621,7 +649,8 @@
 				&qos_link_speed) != 0) {
 		test->flags |= ETH_TEST_FL_FAILED;
 		data[4] = -1;
-	} else if (mac_speed) {
+	} else if (!mac_speed) {
+		test->flags |= ETH_TEST_FL_FAILED;
 		data[4] = 1;
 	}
 }
@@ -662,8 +691,8 @@
 
 	memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
 	eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
-	eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
-				&eeprom_cmd.dma);
+	eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
+					   &eeprom_cmd.dma, GFP_KERNEL);
 
 	if (!eeprom_cmd.va) {
 		dev_err(&adapter->pdev->dev,
@@ -677,8 +706,8 @@
 		resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
 		memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
 	}
-	pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
-			eeprom_cmd.dma);
+	dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
+			  eeprom_cmd.dma);
 
 	return status;
 }
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 4096d97..3f459f7 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -327,6 +327,53 @@
 	u32 dw[4];
 };
 
+struct mgmt_hba_attribs {
+	u8 flashrom_version_string[32];
+	u8 manufacturer_name[32];
+	u32 supported_modes;
+	u32 rsvd0[3];
+	u8 ncsi_ver_string[12];
+	u32 default_extended_timeout;
+	u8 controller_model_number[32];
+	u8 controller_description[64];
+	u8 controller_serial_number[32];
+	u8 ip_version_string[32];
+	u8 firmware_version_string[32];
+	u8 bios_version_string[32];
+	u8 redboot_version_string[32];
+	u8 driver_version_string[32];
+	u8 fw_on_flash_version_string[32];
+	u32 functionalities_supported;
+	u16 max_cdblength;
+	u8 asic_revision;
+	u8 generational_guid[16];
+	u8 hba_port_count;
+	u16 default_link_down_timeout;
+	u8 iscsi_ver_min_max;
+	u8 multifunction_device;
+	u8 cache_valid;
+	u8 hba_status;
+	u8 max_domains_supported;
+	u8 phy_port;
+	u32 firmware_post_status;
+	u32 hba_mtu[8];
+	u32 rsvd1[4];
+};
+
+struct mgmt_controller_attrib {
+	struct mgmt_hba_attribs hba_attribs;
+	u16 pci_vendor_id;
+	u16 pci_device_id;
+	u16 pci_sub_vendor_id;
+	u16 pci_sub_system_id;
+	u8 pci_bus_number;
+	u8 pci_device_number;
+	u8 pci_function_number;
+	u8 interface_type;
+	u64 unique_identifier;
+	u32 rsvd0[5];
+};
+
 struct controller_id {
 	u32 vendor;
 	u32 device;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index de40d3b..0bdccb1 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -125,8 +125,8 @@
 {
 	struct be_dma_mem *mem = &q->dma_mem;
 	if (mem->va)
-		pci_free_consistent(adapter->pdev, mem->size,
-			mem->va, mem->dma);
+		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+				  mem->dma);
 }
 
 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
@@ -138,7 +138,8 @@
 	q->len = len;
 	q->entry_size = entry_size;
 	mem->size = len * entry_size;
-	mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
+	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
+				     GFP_KERNEL);
 	if (!mem->va)
 		return -1;
 	memset(mem->va, 0, mem->size);
@@ -235,12 +236,13 @@
 	if (!be_physfn(adapter))
 		goto netdev_addr;
 
-	status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
+	status = be_cmd_pmac_del(adapter, adapter->if_handle,
+				adapter->pmac_id, 0);
 	if (status)
 		return status;
 
 	status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
-			adapter->if_handle, &adapter->pmac_id);
+				adapter->if_handle, &adapter->pmac_id, 0);
 netdev_addr:
 	if (!status)
 		memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
@@ -312,11 +314,9 @@
 	if (adapter->link_up != link_up) {
 		adapter->link_speed = -1;
 		if (link_up) {
-			netif_start_queue(netdev);
 			netif_carrier_on(netdev);
 			printk(KERN_INFO "%s: Link up\n", netdev->name);
 		} else {
-			netif_stop_queue(netdev);
 			netif_carrier_off(netdev);
 			printk(KERN_INFO "%s: Link down\n", netdev->name);
 		}
@@ -486,7 +486,7 @@
 	AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
 }
 
-static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
+static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
 		bool unmap_single)
 {
 	dma_addr_t dma;
@@ -496,11 +496,10 @@
 	dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
 	if (wrb->frag_len) {
 		if (unmap_single)
-			pci_unmap_single(pdev, dma, wrb->frag_len,
-				PCI_DMA_TODEVICE);
+			dma_unmap_single(dev, dma, wrb->frag_len,
+					 DMA_TO_DEVICE);
 		else
-			pci_unmap_page(pdev, dma, wrb->frag_len,
-				PCI_DMA_TODEVICE);
+			dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
 	}
 }
 
@@ -509,7 +508,7 @@
 {
 	dma_addr_t busaddr;
 	int i, copied = 0;
-	struct pci_dev *pdev = adapter->pdev;
+	struct device *dev = &adapter->pdev->dev;
 	struct sk_buff *first_skb = skb;
 	struct be_queue_info *txq = &adapter->tx_obj.q;
 	struct be_eth_wrb *wrb;
@@ -523,9 +522,8 @@
 
 	if (skb->len > skb->data_len) {
 		int len = skb_headlen(skb);
-		busaddr = pci_map_single(pdev, skb->data, len,
-					 PCI_DMA_TODEVICE);
-		if (pci_dma_mapping_error(pdev, busaddr))
+		busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, busaddr))
 			goto dma_err;
 		map_single = true;
 		wrb = queue_head_node(txq);
@@ -538,10 +536,9 @@
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 		struct skb_frag_struct *frag =
 			&skb_shinfo(skb)->frags[i];
-		busaddr = pci_map_page(pdev, frag->page,
-				       frag->page_offset,
-				       frag->size, PCI_DMA_TODEVICE);
-		if (pci_dma_mapping_error(pdev, busaddr))
+		busaddr = dma_map_page(dev, frag->page, frag->page_offset,
+				       frag->size, DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, busaddr))
 			goto dma_err;
 		wrb = queue_head_node(txq);
 		wrb_fill(wrb, busaddr, frag->size);
@@ -565,7 +562,7 @@
 	txq->head = map_head;
 	while (copied) {
 		wrb = queue_head_node(txq);
-		unmap_tx_frag(pdev, wrb, map_single);
+		unmap_tx_frag(dev, wrb, map_single);
 		map_single = false;
 		copied -= wrb->frag_len;
 		queue_head_inc(txq);
@@ -745,11 +742,11 @@
 	if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
 		status = be_cmd_pmac_del(adapter,
 					adapter->vf_cfg[vf].vf_if_handle,
-					adapter->vf_cfg[vf].vf_pmac_id);
+					adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
 
 	status = be_cmd_pmac_add(adapter, mac,
 				adapter->vf_cfg[vf].vf_if_handle,
-				&adapter->vf_cfg[vf].vf_pmac_id);
+				&adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
 
 	if (status)
 		dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
@@ -824,7 +821,7 @@
 		rate = 10000;
 
 	adapter->vf_cfg[vf].vf_tx_rate = rate;
-	status = be_cmd_set_qos(adapter, rate / 10, vf);
+	status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
 
 	if (status)
 		dev_info(&adapter->pdev->dev,
@@ -890,8 +887,9 @@
 	BUG_ON(!rx_page_info->page);
 
 	if (rx_page_info->last_page_user) {
-		pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
-			adapter->big_page_size, PCI_DMA_FROMDEVICE);
+		dma_unmap_page(&adapter->pdev->dev,
+			       dma_unmap_addr(rx_page_info, bus),
+			       adapter->big_page_size, DMA_FROM_DEVICE);
 		rx_page_info->last_page_user = false;
 	}
 
@@ -1049,6 +1047,9 @@
 	if ((adapter->function_mode & 0x400) && !vtm)
 		vlanf = 0;
 
+	if ((adapter->pvid == vlanf) && !adapter->vlan_tag[vlanf])
+		vlanf = 0;
+
 	if (unlikely(vlanf)) {
 		if (!adapter->vlan_grp || adapter->vlans_added == 0) {
 			kfree_skb(skb);
@@ -1089,6 +1090,9 @@
 	if ((adapter->function_mode & 0x400) && !vtm)
 		vlanf = 0;
 
+	if ((adapter->pvid == vlanf) && !adapter->vlan_tag[vlanf])
+		vlanf = 0;
+
 	skb = napi_get_frags(&eq_obj->napi);
 	if (!skb) {
 		be_rx_compl_discard(adapter, rxo, rxcp);
@@ -1197,9 +1201,9 @@
 				rxo->stats.rx_post_fail++;
 				break;
 			}
-			page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
-						adapter->big_page_size,
-						PCI_DMA_FROMDEVICE);
+			page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
+						    0, adapter->big_page_size,
+						    DMA_FROM_DEVICE);
 			page_info->page_offset = 0;
 		} else {
 			get_page(pagep);
@@ -1272,8 +1276,8 @@
 	do {
 		cur_index = txq->tail;
 		wrb = queue_tail_node(txq);
-		unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
-					skb_headlen(sent_skb)));
+		unmap_tx_frag(&adapter->pdev->dev, wrb,
+			      (unmap_skb_hdr && skb_headlen(sent_skb)));
 		unmap_skb_hdr = false;
 
 		num_wrbs++;
@@ -1829,6 +1833,7 @@
 
 	if (ue_status_lo || ue_status_hi) {
 		adapter->ue_detected = true;
+		adapter->eeh_err = true;
 		dev_err(&adapter->pdev->dev, "UE Detected!!\n");
 	}
 
@@ -1867,10 +1872,14 @@
 			struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
 			be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
 		}
+
+		if (!adapter->ue_detected && !lancer_chip(adapter))
+			be_detect_dump_ue(adapter);
+
 		goto reschedule;
 	}
 
-	if (!adapter->stats_ioctl_sent)
+	if (!adapter->stats_cmd_sent)
 		be_cmd_get_stats(adapter, &adapter->stats_cmd);
 
 	be_tx_rate_update(adapter);
@@ -2181,7 +2190,8 @@
 	memset(mac, 0, ETH_ALEN);
 
 	cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
-	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+				    GFP_KERNEL);
 	if (cmd.va == NULL)
 		return -1;
 	memset(cmd.va, 0, cmd.size);
@@ -2192,8 +2202,8 @@
 		if (status) {
 			dev_err(&adapter->pdev->dev,
 				"Could not enable Wake-on-lan\n");
-			pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
-					cmd.dma);
+			dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+					  cmd.dma);
 			return status;
 		}
 		status = be_cmd_enable_magic_wol(adapter,
@@ -2206,7 +2216,7 @@
 		pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
 	}
 
-	pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
 	return status;
 }
 
@@ -2227,7 +2237,8 @@
 	for (vf = 0; vf < num_vfs; vf++) {
 		status = be_cmd_pmac_add(adapter, mac,
 					adapter->vf_cfg[vf].vf_if_handle,
-					&adapter->vf_cfg[vf].vf_pmac_id);
+					&adapter->vf_cfg[vf].vf_pmac_id,
+					vf + 1);
 		if (status)
 			dev_err(&adapter->pdev->dev,
 				"Mac address add failed for VF %d\n", vf);
@@ -2247,7 +2258,7 @@
 		if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
 			be_cmd_pmac_del(adapter,
 					adapter->vf_cfg[vf].vf_if_handle,
-					adapter->vf_cfg[vf].vf_pmac_id);
+					adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
 	}
 }
 
@@ -2279,22 +2290,26 @@
 		goto do_none;
 
 	if (be_physfn(adapter)) {
-		while (vf < num_vfs) {
-			cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
-					| BE_IF_FLAGS_BROADCAST;
-			status = be_cmd_if_create(adapter, cap_flags, en_flags,
-					mac, true,
+		if (adapter->sriov_enabled) {
+			while (vf < num_vfs) {
+				cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
+							BE_IF_FLAGS_BROADCAST;
+				status = be_cmd_if_create(adapter, cap_flags,
+					en_flags, mac, true,
 					&adapter->vf_cfg[vf].vf_if_handle,
 					NULL, vf+1);
-			if (status) {
-				dev_err(&adapter->pdev->dev,
-				"Interface Create failed for VF %d\n", vf);
-				goto if_destroy;
+				if (status) {
+					dev_err(&adapter->pdev->dev,
+					"Interface Create failed for VF %d\n",
+					vf);
+					goto if_destroy;
+				}
+				adapter->vf_cfg[vf].vf_pmac_id =
+							BE_INVALID_PMAC_ID;
+				vf++;
 			}
-			adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
-			vf++;
 		}
-	} else if (!be_physfn(adapter)) {
+	} else {
 		status = be_cmd_mac_addr_query(adapter, mac,
 			MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
 		if (!status) {
@@ -2315,44 +2330,46 @@
 	if (status != 0)
 		goto rx_qs_destroy;
 
-	if (be_physfn(adapter)) {
-		status = be_vf_eth_addr_config(adapter);
-		if (status)
-			goto mcc_q_destroy;
-	}
-
 	adapter->link_speed = -1;
 
 	return 0;
 
-mcc_q_destroy:
-	if (be_physfn(adapter))
-		be_vf_eth_addr_rem(adapter);
 	be_mcc_queues_destroy(adapter);
 rx_qs_destroy:
 	be_rx_queues_destroy(adapter);
 tx_qs_destroy:
 	be_tx_queues_destroy(adapter);
 if_destroy:
-	for (vf = 0; vf < num_vfs; vf++)
-		if (adapter->vf_cfg[vf].vf_if_handle)
-			be_cmd_if_destroy(adapter,
-					adapter->vf_cfg[vf].vf_if_handle);
-	be_cmd_if_destroy(adapter, adapter->if_handle);
+	if (be_physfn(adapter) && adapter->sriov_enabled)
+		for (vf = 0; vf < num_vfs; vf++)
+			if (adapter->vf_cfg[vf].vf_if_handle)
+				be_cmd_if_destroy(adapter,
+					adapter->vf_cfg[vf].vf_if_handle,
+					vf + 1);
+	be_cmd_if_destroy(adapter, adapter->if_handle, 0);
 do_none:
 	return status;
 }
 
 static int be_clear(struct be_adapter *adapter)
 {
-	if (be_physfn(adapter))
+	int vf;
+
+	if (be_physfn(adapter) && adapter->sriov_enabled)
 		be_vf_eth_addr_rem(adapter);
 
 	be_mcc_queues_destroy(adapter);
 	be_rx_queues_destroy(adapter);
 	be_tx_queues_destroy(adapter);
 
-	be_cmd_if_destroy(adapter, adapter->if_handle);
+	if (be_physfn(adapter) && adapter->sriov_enabled)
+		for (vf = 0; vf < num_vfs; vf++)
+			if (adapter->vf_cfg[vf].vf_if_handle)
+				be_cmd_if_destroy(adapter,
+					adapter->vf_cfg[vf].vf_if_handle,
+					vf + 1);
+
+	be_cmd_if_destroy(adapter, adapter->if_handle,  0);
 
 	/* tell fw we're done with firing cmds */
 	be_cmd_fw_clean(adapter);
@@ -2455,8 +2472,8 @@
 			continue;
 		if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
 			(!be_flash_redboot(adapter, fw->data,
-			 pflashcomp[i].offset, pflashcomp[i].size,
-			 filehdr_size)))
+			pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
+			(num_of_images * sizeof(struct image_hdr)))))
 			continue;
 		p = fw->data;
 		p += filehdr_size + pflashcomp[i].offset
@@ -2530,8 +2547,8 @@
 	dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
 
 	flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
-	flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
-					&flash_cmd.dma);
+	flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
+					  &flash_cmd.dma, GFP_KERNEL);
 	if (!flash_cmd.va) {
 		status = -ENOMEM;
 		dev_err(&adapter->pdev->dev,
@@ -2560,8 +2577,8 @@
 		status = -1;
 	}
 
-	pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
-				flash_cmd.dma);
+	dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
+			  flash_cmd.dma);
 	if (status) {
 		dev_err(&adapter->pdev->dev, "Firmware load error\n");
 		goto fw_exit;
@@ -2628,8 +2645,6 @@
 
 	netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
 		BE_NAPI_WEIGHT);
-
-	netif_stop_queue(netdev);
 }
 
 static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -2704,13 +2719,13 @@
 	be_unmap_pci_bars(adapter);
 
 	if (mem->va)
-		pci_free_consistent(adapter->pdev, mem->size,
-			mem->va, mem->dma);
+		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+				  mem->dma);
 
 	mem = &adapter->mc_cmd_mem;
 	if (mem->va)
-		pci_free_consistent(adapter->pdev, mem->size,
-			mem->va, mem->dma);
+		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
+				  mem->dma);
 }
 
 static int be_ctrl_init(struct be_adapter *adapter)
@@ -2725,8 +2740,10 @@
 		goto done;
 
 	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
-	mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
-				mbox_mem_alloc->size, &mbox_mem_alloc->dma);
+	mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
+						mbox_mem_alloc->size,
+						&mbox_mem_alloc->dma,
+						GFP_KERNEL);
 	if (!mbox_mem_alloc->va) {
 		status = -ENOMEM;
 		goto unmap_pci_bars;
@@ -2738,8 +2755,9 @@
 	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
 
 	mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
-	mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
-			&mc_cmd_mem->dma);
+	mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
+					    mc_cmd_mem->size, &mc_cmd_mem->dma,
+					    GFP_KERNEL);
 	if (mc_cmd_mem->va == NULL) {
 		status = -ENOMEM;
 		goto free_mbox;
@@ -2755,8 +2773,8 @@
 	return 0;
 
 free_mbox:
-	pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
-		mbox_mem_alloc->va, mbox_mem_alloc->dma);
+	dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
+			  mbox_mem_alloc->va, mbox_mem_alloc->dma);
 
 unmap_pci_bars:
 	be_unmap_pci_bars(adapter);
@@ -2770,8 +2788,8 @@
 	struct be_dma_mem *cmd = &adapter->stats_cmd;
 
 	if (cmd->va)
-		pci_free_consistent(adapter->pdev, cmd->size,
-			cmd->va, cmd->dma);
+		dma_free_coherent(&adapter->pdev->dev, cmd->size,
+				  cmd->va, cmd->dma);
 }
 
 static int be_stats_init(struct be_adapter *adapter)
@@ -2779,7 +2797,8 @@
 	struct be_dma_mem *cmd = &adapter->stats_cmd;
 
 	cmd->size = sizeof(struct be_cmd_req_get_stats);
-	cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
+	cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
+				     GFP_KERNEL);
 	if (cmd->va == NULL)
 		return -1;
 	memset(cmd->va, 0, cmd->size);
@@ -2849,6 +2868,10 @@
 	else
 		adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
 
+	status = be_cmd_get_cntl_attributes(adapter);
+	if (status)
+		return status;
+
 	return 0;
 }
 
@@ -2922,11 +2945,11 @@
 	adapter->netdev = netdev;
 	SET_NETDEV_DEV(netdev, &pdev->dev);
 
-	status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
 	if (!status) {
 		netdev->features |= NETIF_F_HIGHDMA;
 	} else {
-		status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 		if (status) {
 			dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
 			goto free_netdev;
@@ -2951,11 +2974,9 @@
 	if (status)
 		goto ctrl_clean;
 
-	if (be_physfn(adapter)) {
-		status = be_cmd_reset_function(adapter);
-		if (status)
-			goto ctrl_clean;
-	}
+	status = be_cmd_reset_function(adapter);
+	if (status)
+		goto ctrl_clean;
 
 	status = be_stats_init(adapter);
 	if (status)
@@ -2979,10 +3000,18 @@
 		goto unsetup;
 	netif_carrier_off(netdev);
 
+	if (be_physfn(adapter) && adapter->sriov_enabled) {
+		status = be_vf_eth_addr_config(adapter);
+		if (status)
+			goto unreg_netdev;
+	}
+
 	dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
 	schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
 	return 0;
 
+unreg_netdev:
+	unregister_netdev(netdev);
 unsetup:
 	be_clear(adapter);
 msix_disable:
@@ -3009,6 +3038,7 @@
 	struct be_adapter *adapter = pci_get_drvdata(pdev);
 	struct net_device *netdev =  adapter->netdev;
 
+	cancel_delayed_work_sync(&adapter->work);
 	if (adapter->wol)
 		be_setup_wol(adapter, true);
 
@@ -3021,6 +3051,7 @@
 	be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
 	be_clear(adapter);
 
+	be_msix_disable(adapter);
 	pci_save_state(pdev);
 	pci_disable_device(pdev);
 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -3042,6 +3073,7 @@
 	pci_set_power_state(pdev, 0);
 	pci_restore_state(pdev);
 
+	be_msix_enable(adapter);
 	/* tell fw we're ready to fire cmds */
 	status = be_cmd_fw_init(adapter);
 	if (status)
@@ -3057,6 +3089,8 @@
 
 	if (adapter->wol)
 		be_setup_wol(adapter, false);
+
+	schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
 	return 0;
 }
 
@@ -3068,6 +3102,9 @@
 	struct be_adapter *adapter = pci_get_drvdata(pdev);
 	struct net_device *netdev =  adapter->netdev;
 
+	if (netif_running(netdev))
+		cancel_delayed_work_sync(&adapter->work);
+
 	netif_device_detach(netdev);
 
 	be_cmd_reset_function(adapter);
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index fad9126..9f356d5 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -126,22 +126,22 @@
 		}
 		unmap_array[unmap_cons].skb = NULL;
 
-		pci_unmap_single(bnad->pcidev,
-				 pci_unmap_addr(&unmap_array[unmap_cons],
+		dma_unmap_single(&bnad->pcidev->dev,
+				 dma_unmap_addr(&unmap_array[unmap_cons],
 						dma_addr), skb_headlen(skb),
-						PCI_DMA_TODEVICE);
+						DMA_TO_DEVICE);
 
-		pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+		dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
 		if (++unmap_cons >= unmap_q->q_depth)
 			break;
 
 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-			pci_unmap_page(bnad->pcidev,
-				       pci_unmap_addr(&unmap_array[unmap_cons],
+			dma_unmap_page(&bnad->pcidev->dev,
+				       dma_unmap_addr(&unmap_array[unmap_cons],
 						      dma_addr),
 				       skb_shinfo(skb)->frags[i].size,
-				       PCI_DMA_TODEVICE);
-			pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+				       DMA_TO_DEVICE);
+			dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
 					   0);
 			if (++unmap_cons >= unmap_q->q_depth)
 				break;
@@ -199,23 +199,23 @@
 		sent_bytes += skb->len;
 		wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
 
-		pci_unmap_single(bnad->pcidev,
-				 pci_unmap_addr(&unmap_array[unmap_cons],
+		dma_unmap_single(&bnad->pcidev->dev,
+				 dma_unmap_addr(&unmap_array[unmap_cons],
 						dma_addr), skb_headlen(skb),
-				 PCI_DMA_TODEVICE);
-		pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
+				 DMA_TO_DEVICE);
+		dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
 		BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
 
 		prefetch(&unmap_array[unmap_cons + 1]);
 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 			prefetch(&unmap_array[unmap_cons + 1]);
 
-			pci_unmap_page(bnad->pcidev,
-				       pci_unmap_addr(&unmap_array[unmap_cons],
+			dma_unmap_page(&bnad->pcidev->dev,
+				       dma_unmap_addr(&unmap_array[unmap_cons],
 						      dma_addr),
 				       skb_shinfo(skb)->frags[i].size,
-				       PCI_DMA_TODEVICE);
-			pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
+				       DMA_TO_DEVICE);
+			dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
 					   0);
 			BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
 		}
@@ -340,19 +340,22 @@
 bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
 {
 	struct bnad_unmap_q *unmap_q;
+	struct bnad_skb_unmap *unmap_array;
 	struct sk_buff *skb;
 	int unmap_cons;
 
 	unmap_q = rcb->unmap_q;
+	unmap_array = unmap_q->unmap_array;
 	for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
-		skb = unmap_q->unmap_array[unmap_cons].skb;
+		skb = unmap_array[unmap_cons].skb;
 		if (!skb)
 			continue;
-		unmap_q->unmap_array[unmap_cons].skb = NULL;
-		pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
-					unmap_array[unmap_cons],
-					dma_addr), rcb->rxq->buffer_size,
-					PCI_DMA_FROMDEVICE);
+		unmap_array[unmap_cons].skb = NULL;
+		dma_unmap_single(&bnad->pcidev->dev,
+				 dma_unmap_addr(&unmap_array[unmap_cons],
+						dma_addr),
+				 rcb->rxq->buffer_size,
+				 DMA_FROM_DEVICE);
 		dev_kfree_skb(skb);
 	}
 	bnad_reset_rcb(bnad, rcb);
@@ -391,9 +394,10 @@
 		skb->dev = bnad->netdev;
 		skb_reserve(skb, NET_IP_ALIGN);
 		unmap_array[unmap_prod].skb = skb;
-		dma_addr = pci_map_single(bnad->pcidev, skb->data,
-			rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE);
-		pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
+		dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
+					  rcb->rxq->buffer_size,
+					  DMA_FROM_DEVICE);
+		dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
 				   dma_addr);
 		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
 		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -434,8 +438,9 @@
 	struct bna_rcb *rcb = NULL;
 	unsigned int wi_range, packets = 0, wis = 0;
 	struct bnad_unmap_q *unmap_q;
+	struct bnad_skb_unmap *unmap_array;
 	struct sk_buff *skb;
-	u32 flags;
+	u32 flags, unmap_cons;
 	u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
 	struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
 
@@ -456,17 +461,17 @@
 			rcb = ccb->rcb[1];
 
 		unmap_q = rcb->unmap_q;
+		unmap_array = unmap_q->unmap_array;
+		unmap_cons = unmap_q->consumer_index;
 
-		skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
+		skb = unmap_array[unmap_cons].skb;
 		BUG_ON(!(skb));
-		unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
-		pci_unmap_single(bnad->pcidev,
-				 pci_unmap_addr(&unmap_q->
-						unmap_array[unmap_q->
-							    consumer_index],
+		unmap_array[unmap_cons].skb = NULL;
+		dma_unmap_single(&bnad->pcidev->dev,
+				 dma_unmap_addr(&unmap_array[unmap_cons],
 						dma_addr),
-						rcb->rxq->buffer_size,
-						PCI_DMA_FROMDEVICE);
+				 rcb->rxq->buffer_size,
+				 DMA_FROM_DEVICE);
 		BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
 
 		/* Should be more efficient ? Performance ? */
@@ -1015,9 +1020,9 @@
 			if (mem_info->mem_type == BNA_MEM_T_DMA) {
 				BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
 						dma_pa);
-				pci_free_consistent(bnad->pcidev,
-						mem_info->mdl[i].len,
-						mem_info->mdl[i].kva, dma_pa);
+				dma_free_coherent(&bnad->pcidev->dev,
+						  mem_info->mdl[i].len,
+						  mem_info->mdl[i].kva, dma_pa);
 			} else
 				kfree(mem_info->mdl[i].kva);
 		}
@@ -1047,8 +1052,9 @@
 		for (i = 0; i < mem_info->num; i++) {
 			mem_info->mdl[i].len = mem_info->len;
 			mem_info->mdl[i].kva =
-				pci_alloc_consistent(bnad->pcidev,
-						mem_info->len, &dma_pa);
+				dma_alloc_coherent(&bnad->pcidev->dev,
+						mem_info->len, &dma_pa,
+						GFP_KERNEL);
 
 			if (mem_info->mdl[i].kva == NULL)
 				goto err_return;
@@ -2600,9 +2606,9 @@
 	unmap_q->unmap_array[unmap_prod].skb = skb;
 	BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
 	txqent->vector[vect_id].length = htons(skb_headlen(skb));
-	dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb),
-		PCI_DMA_TODEVICE);
-	pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+	dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
+				  skb_headlen(skb), DMA_TO_DEVICE);
+	dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
 			   dma_addr);
 
 	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
@@ -2630,11 +2636,9 @@
 
 		BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
 		txqent->vector[vect_id].length = htons(size);
-		dma_addr =
-			pci_map_page(bnad->pcidev, frag->page,
-				     frag->page_offset, size,
-				     PCI_DMA_TODEVICE);
-		pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
+		dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
+					frag->page_offset, size, DMA_TO_DEVICE);
+		dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
 				   dma_addr);
 		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
 		BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
@@ -3022,14 +3026,14 @@
 	err = pci_request_regions(pdev, BNAD_NAME);
 	if (err)
 		goto disable_device;
-	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
-	    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
 		*using_dac = 1;
 	} else {
-		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 		if (err) {
-			err = pci_set_consistent_dma_mask(pdev,
-						DMA_BIT_MASK(32));
+			err = dma_set_coherent_mask(&pdev->dev,
+						    DMA_BIT_MASK(32));
 			if (err)
 				goto release_regions;
 		}
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
index 8b1d515..a89117f 100644
--- a/drivers/net/bna/bnad.h
+++ b/drivers/net/bna/bnad.h
@@ -181,7 +181,7 @@
 /* Unmap queues for Tx / Rx cleanup */
 struct bnad_skb_unmap {
 	struct sk_buff		*skb;
-	DECLARE_PCI_UNMAP_ADDR(dma_addr)
+	DEFINE_DMA_UNMAP_ADDR(dma_addr);
 };
 
 struct bnad_unmap_q {
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index df99edf..2a961b7 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -435,7 +435,8 @@
 	struct cnic_ctl_info info;
 
 	mutex_lock(&bp->cnic_lock);
-	c_ops = bp->cnic_ops;
+	c_ops = rcu_dereference_protected(bp->cnic_ops,
+					  lockdep_is_held(&bp->cnic_lock));
 	if (c_ops) {
 		info.cmd = CNIC_CTL_STOP_CMD;
 		c_ops->cnic_ctl(bp->cnic_data, &info);
@@ -450,7 +451,8 @@
 	struct cnic_ctl_info info;
 
 	mutex_lock(&bp->cnic_lock);
-	c_ops = bp->cnic_ops;
+	c_ops = rcu_dereference_protected(bp->cnic_ops,
+					  lockdep_is_held(&bp->cnic_lock));
 	if (c_ops) {
 		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
 			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
@@ -7553,6 +7555,10 @@
 	    !(data & ETH_FLAG_RXVLAN))
 		return -EINVAL;
 
+	/* TSO with VLAN tag won't work with current firmware */
+	if (!(data & ETH_FLAG_TXVLAN))
+		return -EINVAL;
+
 	rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN |
 				  ETH_FLAG_TXVLAN);
 	if (rc)
@@ -7962,11 +7968,8 @@
 
 		/* AER (Advanced Error Reporting) hooks */
 		err = pci_enable_pcie_error_reporting(pdev);
-		if (err) {
-			dev_err(&pdev->dev, "pci_enable_pcie_error_reporting "
-					    "failed 0x%x\n", err);
-			/* non-fatal, continue */
-		}
+		if (!err)
+			bp->flags |= BNX2_FLAG_AER_ENABLED;
 
 	} else {
 		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
@@ -8229,8 +8232,10 @@
 	return 0;
 
 err_out_unmap:
-	if (bp->flags & BNX2_FLAG_PCIE)
+	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
 		pci_disable_pcie_error_reporting(pdev);
+		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
+	}
 
 	if (bp->regview) {
 		iounmap(bp->regview);
@@ -8312,7 +8317,7 @@
 #endif
 };
 
-static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
+static void inline vlan_features_add(struct net_device *dev, u32 flags)
 {
 	dev->vlan_features |= flags;
 }
@@ -8418,8 +8423,10 @@
 
 	kfree(bp->temp_stats_blk);
 
-	if (bp->flags & BNX2_FLAG_PCIE)
+	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
 		pci_disable_pcie_error_reporting(pdev);
+		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
+	}
 
 	free_netdev(dev);
 
@@ -8535,7 +8542,7 @@
 	}
 	rtnl_unlock();
 
-	if (!(bp->flags & BNX2_FLAG_PCIE))
+	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
 		return result;
 
 	err = pci_cleanup_aer_uncorrect_error_status(pdev);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 5488a2e..7a5e88f 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6207,6 +6207,8 @@
 
 #define BNX2_CP_SCRATCH					0x001a0000
 
+#define BNX2_FW_MAX_ISCSI_CONN				 0x001a0080
+
 
 /*
  *  mcp_reg definition
@@ -6741,6 +6743,7 @@
 #define BNX2_FLAG_JUMBO_BROKEN		0x00000800
 #define BNX2_FLAG_CAN_KEEP_VLAN		0x00001000
 #define BNX2_FLAG_BROKEN_STATS		0x00002000
+#define BNX2_FLAG_AER_ENABLED		0x00004000
 
 	struct bnx2_napi	bnx2_napi[BNX2_MAX_MSIX_VEC];
 
@@ -6758,7 +6761,7 @@
 	u32		tx_wake_thresh;
 
 #ifdef BCM_CNIC
-	struct cnic_ops		*cnic_ops;
+	struct cnic_ops	__rcu	*cnic_ops;
 	void			*cnic_data;
 #endif
 
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index a6cd335..c0dd30d 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -22,8 +22,8 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.62.00-3"
-#define DRV_MODULE_RELDATE      "2010/12/21"
+#define DRV_MODULE_VERSION      "1.62.11-0"
+#define DRV_MODULE_RELDATE      "2011/01/31"
 #define BNX2X_BC_VER            0x040200
 
 #define BNX2X_MULTI_QUEUE
@@ -129,6 +129,7 @@
 #endif
 
 #define bnx2x_mc_addr(ha)      ((ha)->addr)
+#define bnx2x_uc_addr(ha)      ((ha)->addr)
 
 #define U64_LO(x)			(u32)(((u64)(x)) & 0xffffffff)
 #define U64_HI(x)			(u32)(((u64)(x)) >> 32)
@@ -341,6 +342,8 @@
 	/* chip independed shortcut into rx_prods_offset memory */
 	u32			ustorm_rx_prods_offset;
 
+	u32			rx_buf_size;
+
 	dma_addr_t		status_blk_mapping;
 
 	struct sw_tx_bd		*tx_buf_ring;
@@ -428,6 +431,10 @@
 };
 
 #define bnx2x_fp(bp, nr, var)		(bp->fp[nr].var)
+
+/* Use 2500 as a mini-jumbo MTU for FCoE */
+#define BNX2X_FCOE_MINI_JUMBO_MTU	2500
+
 #ifdef BCM_CNIC
 /* FCoE L2 `fastpath' is right after the eth entries */
 #define FCOE_IDX			BNX2X_NUM_ETH_QUEUES(bp)
@@ -810,6 +817,7 @@
 	struct eth_stats_query		fw_stats;
 	struct mac_configuration_cmd	mac_config;
 	struct mac_configuration_cmd	mcast_config;
+	struct mac_configuration_cmd	uc_mac_config;
 	struct client_init_ramrod_data	client_init_data;
 
 	/* used by dmae command executer */
@@ -911,7 +919,6 @@
 	int			tx_ring_size;
 
 	u32			rx_csum;
-	u32			rx_buf_size;
 /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
 #define ETH_OVREHEAD		(ETH_HLEN + 8 + 8)
 #define ETH_MIN_PACKET_SIZE		60
@@ -939,7 +946,7 @@
 	struct eth_spe		*spq_prod_bd;
 	struct eth_spe		*spq_last_bd;
 	__le16			*dsb_sp_prod;
-	atomic_t		spq_left; /* serialize spq */
+	atomic_t		cq_spq_left; /* ETH_XXX ramrods credit */
 	/* used to synchronize spq accesses */
 	spinlock_t		spq_lock;
 
@@ -949,6 +956,7 @@
 	u16			eq_prod;
 	u16			eq_cons;
 	__le16			*eq_cons_sb;
+	atomic_t		eq_spq_left; /* COMMON_XXX ramrods credit */
 
 	/* Flags for marking that there is a STAT_QUERY or
 	   SET_MAC ramrod pending */
@@ -976,8 +984,12 @@
 #define MF_FUNC_DIS			0x1000
 #define FCOE_MACS_SET			0x2000
 #define NO_FCOE_FLAG			0x4000
+#define NO_ISCSI_OOO_FLAG		0x8000
+#define NO_ISCSI_FLAG			0x10000
 
 #define NO_FCOE(bp)		((bp)->flags & NO_FCOE_FLAG)
+#define NO_ISCSI(bp)		((bp)->flags & NO_ISCSI_FLAG)
+#define NO_ISCSI_OOO(bp)	((bp)->flags & NO_ISCSI_OOO_FLAG)
 
 	int			pf_num;	/* absolute PF number */
 	int			pfid;	/* per-path PF number */
@@ -1064,6 +1076,7 @@
 	int			num_queues;
 	int			disable_tpa;
 	int			int_mode;
+	u32			*rx_indir_table;
 
 	struct tstorm_eth_mac_filter_config	mac_filters;
 #define BNX2X_ACCEPT_NONE		0x0000
@@ -1110,7 +1123,7 @@
 #define BNX2X_CNIC_FLAG_MAC_SET		1
 	void			*t2;
 	dma_addr_t		t2_mapping;
-	struct cnic_ops		*cnic_ops;
+	struct cnic_ops	__rcu	*cnic_ops;
 	void			*cnic_data;
 	u32			cnic_tag;
 	struct cnic_eth_dev	cnic_eth_dev;
@@ -1125,13 +1138,12 @@
 	u16			cnic_kwq_pending;
 	u16			cnic_spq_pending;
 	struct mutex		cnic_mutex;
-	u8			iscsi_mac[ETH_ALEN];
 	u8			fip_mac[ETH_ALEN];
 #endif
 
 	int			dmae_ready;
 	/* used to synchronize dmae accesses */
-	struct mutex		dmae_mutex;
+	spinlock_t		dmae_lock;
 
 	/* used to protect the FW mail box */
 	struct mutex		fw_mb_mutex;
@@ -1447,6 +1459,12 @@
 void bnx2x_calc_fc_adv(struct bnx2x *bp);
 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
 		  u32 data_hi, u32 data_lo, int common);
+
+/* Clears multicast and unicast list configuration in the chip. */
+void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp);
+void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp);
+void bnx2x_invalidate_uc_list(struct bnx2x *bp);
+
 void bnx2x_update_coalesce(struct bnx2x *bp);
 int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
 
@@ -1782,5 +1800,6 @@
 BNX2X_EXTERN int load_count[2][3]; /* per path: 0-common, 1-port0, 2-port1 */
 
 extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
+void bnx2x_push_indir_table(struct bnx2x *bp);
 
 #endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 710ce5d..6fac8e1 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -232,7 +232,7 @@
 	/* move empty skb from pool to prod and map it */
 	prod_rx_buf->skb = fp->tpa_pool[queue].skb;
 	mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
-				 bp->rx_buf_size, DMA_FROM_DEVICE);
+				 fp->rx_buf_size, DMA_FROM_DEVICE);
 	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
 
 	/* move partial skb from cons to pool (don't unmap yet) */
@@ -333,13 +333,13 @@
 	struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
 	struct sk_buff *skb = rx_buf->skb;
 	/* alloc new skb */
-	struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+	struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
 
 	/* Unmap skb in the pool anyway, as we are going to change
 	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
 	   fails. */
 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
-			 bp->rx_buf_size, DMA_FROM_DEVICE);
+			 fp->rx_buf_size, DMA_FROM_DEVICE);
 
 	if (likely(new_skb)) {
 		/* fix ip xsum and give it to the stack */
@@ -349,10 +349,10 @@
 		prefetch(((char *)(skb)) + L1_CACHE_BYTES);
 
 #ifdef BNX2X_STOP_ON_ERROR
-		if (pad + len > bp->rx_buf_size) {
+		if (pad + len > fp->rx_buf_size) {
 			BNX2X_ERR("skb_put is about to fail...  "
 				  "pad %d  len %d  rx_buf_size %d\n",
-				  pad, len, bp->rx_buf_size);
+				  pad, len, fp->rx_buf_size);
 			bnx2x_panic();
 			return;
 		}
@@ -582,7 +582,7 @@
 			if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
 				dma_unmap_single(&bp->pdev->dev,
 					dma_unmap_addr(rx_buf, mapping),
-						 bp->rx_buf_size,
+						 fp->rx_buf_size,
 						 DMA_FROM_DEVICE);
 				skb_reserve(skb, pad);
 				skb_put(skb, len);
@@ -821,19 +821,16 @@
 	u16 ring_prod;
 	int i, j;
 
-	bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
-		IP_HEADER_ALIGNMENT_PADDING;
-
-	DP(NETIF_MSG_IFUP,
-	   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
-
 	for_each_rx_queue(bp, j) {
 		struct bnx2x_fastpath *fp = &bp->fp[j];
 
+		DP(NETIF_MSG_IFUP,
+		   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
+
 		if (!fp->disable_tpa) {
 			for (i = 0; i < max_agg_queues; i++) {
 				fp->tpa_pool[i].skb =
-				   netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+				   netdev_alloc_skb(bp->dev, fp->rx_buf_size);
 				if (!fp->tpa_pool[i].skb) {
 					BNX2X_ERR("Failed to allocate TPA "
 						  "skb pool for queue[%d] - "
@@ -941,7 +938,7 @@
 
 			dma_unmap_single(&bp->pdev->dev,
 					 dma_unmap_addr(rx_buf, mapping),
-					 bp->rx_buf_size, DMA_FROM_DEVICE);
+					 fp->rx_buf_size, DMA_FROM_DEVICE);
 
 			rx_buf->skb = NULL;
 			dev_kfree_skb(skb);
@@ -1249,6 +1246,31 @@
 	return rc;
 }
 
+static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		/* Always use a mini-jumbo MTU for the FCoE L2 ring */
+		if (IS_FCOE_IDX(i))
+			/*
+			 * Although there are no IP frames expected to arrive to
+			 * this ring we still want to add an
+			 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
+			 * overrun attack.
+			 */
+			fp->rx_buf_size =
+				BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
+				BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+		else
+			fp->rx_buf_size =
+				bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
+				IP_HEADER_ALIGNMENT_PADDING;
+	}
+}
+
 /* must be called with rtnl_lock */
 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 {
@@ -1272,6 +1294,9 @@
 	/* must be called before memory allocation and HW init */
 	bnx2x_ilt_set_info(bp);
 
+	/* Set the receive queues buffer size */
+	bnx2x_set_rx_buf_size(bp);
+
 	if (bnx2x_alloc_mem(bp))
 		return -ENOMEM;
 
@@ -1427,28 +1452,35 @@
 
 	bnx2x_set_eth_mac(bp, 1);
 
+	/* Clear MC configuration */
+	if (CHIP_IS_E1(bp))
+		bnx2x_invalidate_e1_mc_list(bp);
+	else
+		bnx2x_invalidate_e1h_mc_list(bp);
+
+	/* Clear UC lists configuration */
+	bnx2x_invalidate_uc_list(bp);
+
 	if (bp->port.pmf)
 		bnx2x_initial_phy_init(bp, load_mode);
 
+	/* Initialize Rx filtering */
+	bnx2x_set_rx_mode(bp->dev);
+
 	/* Start fast path */
 	switch (load_mode) {
 	case LOAD_NORMAL:
 		/* Tx queue should be only reenabled */
 		netif_tx_wake_all_queues(bp->dev);
 		/* Initialize the receive filter. */
-		bnx2x_set_rx_mode(bp->dev);
 		break;
 
 	case LOAD_OPEN:
 		netif_tx_start_all_queues(bp->dev);
 		smp_mb__after_clear_bit();
-		/* Initialize the receive filter. */
-		bnx2x_set_rx_mode(bp->dev);
 		break;
 
 	case LOAD_DIAG:
-		/* Initialize the receive filter. */
-		bnx2x_set_rx_mode(bp->dev);
 		bp->state = BNX2X_STATE_DIAG;
 		break;
 
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 03eb4d6..f062d5d 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -822,11 +822,11 @@
 	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
 	dma_addr_t mapping;
 
-	skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+	skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
 	if (unlikely(skb == NULL))
 		return -ENOMEM;
 
-	mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
+	mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
 				 DMA_FROM_DEVICE);
 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
 		dev_kfree_skb(skb);
@@ -892,7 +892,7 @@
 		if (fp->tpa_state[i] == BNX2X_TPA_START)
 			dma_unmap_single(&bp->pdev->dev,
 					 dma_unmap_addr(rx_buf, mapping),
-					 bp->rx_buf_size, DMA_FROM_DEVICE);
+					 fp->rx_buf_size, DMA_FROM_DEVICE);
 
 		dev_kfree_skb(skb);
 		rx_buf->skb = NULL;
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 5b44a8b..8d19d12 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -1618,7 +1618,7 @@
 	/* prepare the loopback packet */
 	pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
 		     bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
-	skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+	skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
 	if (!skb) {
 		rc = -ENOMEM;
 		goto test_loopback_exit;
@@ -2134,6 +2134,59 @@
 	return 0;
 }
 
+static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+			   void *rules __always_unused)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	switch (info->cmd) {
+	case ETHTOOL_GRXRINGS:
+		info->data = BNX2X_NUM_ETH_QUEUES(bp);
+		return 0;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int bnx2x_get_rxfh_indir(struct net_device *dev,
+				struct ethtool_rxfh_indir *indir)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	size_t copy_size =
+		min_t(size_t, indir->size, TSTORM_INDIRECTION_TABLE_SIZE);
+
+	if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+		return -EOPNOTSUPP;
+
+	indir->size = TSTORM_INDIRECTION_TABLE_SIZE;
+	memcpy(indir->ring_index, bp->rx_indir_table,
+	       copy_size * sizeof(bp->rx_indir_table[0]));
+	return 0;
+}
+
+static int bnx2x_set_rxfh_indir(struct net_device *dev,
+				const struct ethtool_rxfh_indir *indir)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	size_t i;
+
+	if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+		return -EOPNOTSUPP;
+
+	/* Validate size and indices */
+	if (indir->size != TSTORM_INDIRECTION_TABLE_SIZE)
+		return -EINVAL;
+	for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
+		if (indir->ring_index[i] >= BNX2X_NUM_ETH_QUEUES(bp))
+			return -EINVAL;
+
+	memcpy(bp->rx_indir_table, indir->ring_index,
+	       indir->size * sizeof(bp->rx_indir_table[0]));
+	bnx2x_push_indir_table(bp);
+	return 0;
+}
+
 static const struct ethtool_ops bnx2x_ethtool_ops = {
 	.get_settings		= bnx2x_get_settings,
 	.set_settings		= bnx2x_set_settings,
@@ -2170,6 +2223,9 @@
 	.get_strings		= bnx2x_get_strings,
 	.phys_id		= bnx2x_phys_id,
 	.get_ethtool_stats	= bnx2x_get_ethtool_stats,
+	.get_rxnfc		= bnx2x_get_rxnfc,
+	.get_rxfh_indir		= bnx2x_get_rxfh_indir,
+	.set_rxfh_indir		= bnx2x_set_rxfh_indir,
 };
 
 void bnx2x_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 6238d4f..be503cc 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -11,21 +11,28 @@
 
 #include "bnx2x_fw_defs.h"
 
+#define FW_ENCODE_32BIT_PATTERN		0x1e1e1e1e
+
 struct license_key {
 	u32 reserved[6];
 
-#if defined(__BIG_ENDIAN)
-	u16 max_iscsi_init_conn;
-	u16 max_iscsi_trgt_conn;
-#elif defined(__LITTLE_ENDIAN)
-	u16 max_iscsi_trgt_conn;
-	u16 max_iscsi_init_conn;
-#endif
+	u32 max_iscsi_conn;
+#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK	0xFFFF
+#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT	0
+#define BNX2X_MAX_ISCSI_INIT_CONN_MASK	0xFFFF0000
+#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT	16
 
-	u32 reserved_a[6];
+	u32 reserved_a;
+
+	u32 max_fcoe_conn;
+#define BNX2X_MAX_FCOE_TRGT_CONN_MASK	0xFFFF
+#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT	0
+#define BNX2X_MAX_FCOE_INIT_CONN_MASK	0xFFFF0000
+#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT	16
+
+	u32 reserved_b[4];
 };
 
-
 #define PORT_0				0
 #define PORT_1				1
 #define PORT_MAX			2
@@ -237,8 +244,26 @@
 #define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT	      16
 
 
-	u32 Reserved0[16];				    /* 0x158 */
+	u32 Reserved0[3];				    /* 0x158 */
+	/*	Controls the TX laser of the SFP+ module */
+	u32 sfp_ctrl;					/* 0x164 */
+#define PORT_HW_CFG_TX_LASER_MASK			      0x000000FF
+#define PORT_HW_CFG_TX_LASER_SHIFT			      0
+#define PORT_HW_CFG_TX_LASER_MDIO			      0x00000000
+#define PORT_HW_CFG_TX_LASER_GPIO0			      0x00000001
+#define PORT_HW_CFG_TX_LASER_GPIO1			      0x00000002
+#define PORT_HW_CFG_TX_LASER_GPIO2			      0x00000003
+#define PORT_HW_CFG_TX_LASER_GPIO3			      0x00000004
 
+    /*	Controls the fault module LED of the SFP+ */
+#define PORT_HW_CFG_FAULT_MODULE_LED_MASK		      0x0000FF00
+#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT		      8
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0		      0x00000000
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1		      0x00000100
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2		      0x00000200
+#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3		      0x00000300
+#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED		      0x00000400
+	u32 Reserved01[12];				    /* 0x158 */
 	/*  for external PHY, or forced mode or during AN */
 	u16 xgxs_config_rx[4];				    /* 0x198 */
 
@@ -246,12 +271,78 @@
 
 	u32 Reserved1[56];				    /* 0x1A8 */
 	u32 default_cfg;				    /* 0x288 */
+#define PORT_HW_CFG_GPIO0_CONFIG_MASK			      0x00000003
+#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT			      0
+#define PORT_HW_CFG_GPIO0_CONFIG_NA			      0x00000000
+#define PORT_HW_CFG_GPIO0_CONFIG_LOW			      0x00000001
+#define PORT_HW_CFG_GPIO0_CONFIG_HIGH			      0x00000002
+#define PORT_HW_CFG_GPIO0_CONFIG_INPUT			      0x00000003
+
+#define PORT_HW_CFG_GPIO1_CONFIG_MASK			      0x0000000C
+#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT			      2
+#define PORT_HW_CFG_GPIO1_CONFIG_NA			      0x00000000
+#define PORT_HW_CFG_GPIO1_CONFIG_LOW			      0x00000004
+#define PORT_HW_CFG_GPIO1_CONFIG_HIGH			      0x00000008
+#define PORT_HW_CFG_GPIO1_CONFIG_INPUT			      0x0000000c
+
+#define PORT_HW_CFG_GPIO2_CONFIG_MASK			      0x00000030
+#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT			      4
+#define PORT_HW_CFG_GPIO2_CONFIG_NA			      0x00000000
+#define PORT_HW_CFG_GPIO2_CONFIG_LOW			      0x00000010
+#define PORT_HW_CFG_GPIO2_CONFIG_HIGH			      0x00000020
+#define PORT_HW_CFG_GPIO2_CONFIG_INPUT			      0x00000030
+
+#define PORT_HW_CFG_GPIO3_CONFIG_MASK			      0x000000C0
+#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT			      6
+#define PORT_HW_CFG_GPIO3_CONFIG_NA			      0x00000000
+#define PORT_HW_CFG_GPIO3_CONFIG_LOW			      0x00000040
+#define PORT_HW_CFG_GPIO3_CONFIG_HIGH			      0x00000080
+#define PORT_HW_CFG_GPIO3_CONFIG_INPUT			      0x000000c0
+
+	/*
+	 * When KR link is required to be set to force which is not
+	 * KR-compliant, this parameter determine what is the trigger for it.
+	 * When GPIO is selected, low input will force the speed. Currently
+	 * default speed is 1G. In the future, it may be widen to select the
+	 * forced speed in with another parameter. Note when force-1G is
+	 * enabled, it override option 56: Link Speed option.
+	 */
+#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK		      0x00000F00
+#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT		      8
+#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED		      0x00000000
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0		      0x00000100
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0		      0x00000200
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0		      0x00000300
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0		      0x00000400
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1		      0x00000500
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1		      0x00000600
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1		      0x00000700
+#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1		      0x00000800
+#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED		      0x00000900
+    /*	Enable to determine with which GPIO to reset the external phy */
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK		      0x000F0000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT		      16
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE		      0x00000000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0		      0x00010000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0		      0x00020000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0		      0x00030000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0		      0x00040000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1		      0x00050000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1		      0x00060000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1		      0x00070000
+#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1		      0x00080000
 	/*  Enable BAM on KR */
 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK		      0x00100000
 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT		      20
 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED		      0x00000000
 #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED		      0x00100000
 
+	/*  Enable Common Mode Sense */
+#define PORT_HW_CFG_ENABLE_CMS_MASK			      0x00200000
+#define PORT_HW_CFG_ENABLE_CMS_SHIFT			      21
+#define PORT_HW_CFG_ENABLE_CMS_DISABLED			      0x00000000
+#define PORT_HW_CFG_ENABLE_CMS_ENABLED			      0x00200000
+
 	u32 speed_capability_mask2;			    /* 0x28C */
 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK		      0x0000FFFF
 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT		      0
@@ -352,6 +443,10 @@
 #define PORT_HW_CFG_LANE_SWAP_CFG_31203120	    0x0000d8d8
 	/* forced only */
 #define PORT_HW_CFG_LANE_SWAP_CFG_32103210	    0x0000e4e4
+    /*	Indicate whether to swap the external phy polarity */
+#define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK	       0x00010000
+#define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED	    0x00000000
+#define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED	    0x00010000
 
 	u32 external_phy_config;
 #define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK	    0xff000000
@@ -377,6 +472,7 @@
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727	    0x00000900
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC   0x00000a00
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823	    0x00000b00
+#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833	    0x00000d00
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE	    0x0000fd00
 #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN	    0x0000ff00
 
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 43b0de2..f2f367d 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -1,4 +1,4 @@
-/* Copyright 2008-2009 Broadcom Corporation
+/* Copyright 2008-2011 Broadcom Corporation
  *
  * Unless you and Broadcom execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
@@ -28,12 +28,13 @@
 
 /********************************************************/
 #define ETH_HLEN			14
-#define ETH_OVREHEAD		(ETH_HLEN + 8 + 8)/* 16 for CRC + VLAN + LLC */
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVREHEAD			(ETH_HLEN + 8 + 8)
 #define ETH_MIN_PACKET_SIZE		60
 #define ETH_MAX_PACKET_SIZE		1500
 #define ETH_MAX_JUMBO_PACKET_SIZE	9600
 #define MDIO_ACCESS_TIMEOUT		1000
-#define BMAC_CONTROL_RX_ENABLE	2
+#define BMAC_CONTROL_RX_ENABLE		2
 
 /***********************************************************/
 /*			Shortcut definitions		   */
@@ -79,7 +80,7 @@
 
 #define AUTONEG_CL37		SHARED_HW_CFG_AN_ENABLE_CL37
 #define AUTONEG_CL73		SHARED_HW_CFG_AN_ENABLE_CL73
-#define AUTONEG_BAM 		SHARED_HW_CFG_AN_ENABLE_BAM
+#define AUTONEG_BAM		SHARED_HW_CFG_AN_ENABLE_BAM
 #define AUTONEG_PARALLEL \
 				SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
 #define AUTONEG_SGMII_FIBER_AUTODET \
@@ -112,10 +113,10 @@
 #define GP_STATUS_10G_KX4 \
 			MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
 
-#define LINK_10THD			LINK_STATUS_SPEED_AND_DUPLEX_10THD
-#define LINK_10TFD			LINK_STATUS_SPEED_AND_DUPLEX_10TFD
+#define LINK_10THD		LINK_STATUS_SPEED_AND_DUPLEX_10THD
+#define LINK_10TFD		LINK_STATUS_SPEED_AND_DUPLEX_10TFD
 #define LINK_100TXHD		LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
-#define LINK_100T4			LINK_STATUS_SPEED_AND_DUPLEX_100T4
+#define LINK_100T4		LINK_STATUS_SPEED_AND_DUPLEX_100T4
 #define LINK_100TXFD		LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
 #define LINK_1000THD		LINK_STATUS_SPEED_AND_DUPLEX_1000THD
 #define LINK_1000TFD		LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
@@ -123,18 +124,18 @@
 #define LINK_2500THD		LINK_STATUS_SPEED_AND_DUPLEX_2500THD
 #define LINK_2500TFD		LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
 #define LINK_2500XFD		LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
-#define LINK_10GTFD			LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
-#define LINK_10GXFD			LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
-#define LINK_12GTFD			LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
-#define LINK_12GXFD			LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
+#define LINK_10GTFD		LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
+#define LINK_10GXFD		LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
+#define LINK_12GTFD		LINK_STATUS_SPEED_AND_DUPLEX_12GTFD
+#define LINK_12GXFD		LINK_STATUS_SPEED_AND_DUPLEX_12GXFD
 #define LINK_12_5GTFD		LINK_STATUS_SPEED_AND_DUPLEX_12_5GTFD
 #define LINK_12_5GXFD		LINK_STATUS_SPEED_AND_DUPLEX_12_5GXFD
-#define LINK_13GTFD			LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
-#define LINK_13GXFD			LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
-#define LINK_15GTFD			LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
-#define LINK_15GXFD			LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
-#define LINK_16GTFD			LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
-#define LINK_16GXFD			LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
+#define LINK_13GTFD		LINK_STATUS_SPEED_AND_DUPLEX_13GTFD
+#define LINK_13GXFD		LINK_STATUS_SPEED_AND_DUPLEX_13GXFD
+#define LINK_15GTFD		LINK_STATUS_SPEED_AND_DUPLEX_15GTFD
+#define LINK_15GXFD		LINK_STATUS_SPEED_AND_DUPLEX_15GXFD
+#define LINK_16GTFD		LINK_STATUS_SPEED_AND_DUPLEX_16GTFD
+#define LINK_16GXFD		LINK_STATUS_SPEED_AND_DUPLEX_16GXFD
 
 #define PHY_XGXS_FLAG			0x1
 #define PHY_SGMII_FLAG			0x2
@@ -142,7 +143,7 @@
 
 /* */
 #define SFP_EEPROM_CON_TYPE_ADDR		0x2
-	#define SFP_EEPROM_CON_TYPE_VAL_LC 		0x7
+	#define SFP_EEPROM_CON_TYPE_VAL_LC	0x7
 	#define SFP_EEPROM_CON_TYPE_VAL_COPPER	0x21
 
 
@@ -153,15 +154,15 @@
 
 #define SFP_EEPROM_FC_TX_TECH_ADDR		0x8
 	#define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
-	#define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE	 0x8
+	#define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE  0x8
 
-#define SFP_EEPROM_OPTIONS_ADDR 		0x40
+#define SFP_EEPROM_OPTIONS_ADDR			0x40
 	#define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
-#define SFP_EEPROM_OPTIONS_SIZE 		2
+#define SFP_EEPROM_OPTIONS_SIZE			2
 
-#define EDC_MODE_LINEAR	 			0x0022
-#define EDC_MODE_LIMITING	 			0x0044
-#define EDC_MODE_PASSIVE_DAC 			0x0055
+#define EDC_MODE_LINEAR				0x0022
+#define EDC_MODE_LIMITING				0x0044
+#define EDC_MODE_PASSIVE_DAC			0x0055
 
 
 #define ETS_BW_LIMIT_CREDIT_UPPER_BOUND		(0x5000)
@@ -170,24 +171,18 @@
 /*                     INTERFACE                          */
 /**********************************************************/
 
-#define CL45_WR_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
+#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
 	bnx2x_cl45_write(_bp, _phy, \
 		(_phy)->def_md_devad, \
 		(_bank + (_addr & 0xf)), \
 		_val)
 
-#define CL45_RD_OVER_CL22(_bp, _phy, _bank, _addr, _val) \
+#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
 	bnx2x_cl45_read(_bp, _phy, \
 		(_phy)->def_md_devad, \
 		(_bank + (_addr & 0xf)), \
 		_val)
 
-static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
-			  u8 devad, u16 reg, u16 *ret_val);
-
-static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
-			   u8 devad, u16 reg, u16 val);
-
 static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
 {
 	u32 val = REG_RD(bp, reg);
@@ -216,7 +211,7 @@
 
 	DP(NETIF_MSG_LINK, "ETS disabled configuration\n");
 
-	/**
+	/*
 	 * mapping between entry  priority to client number (0,1,2 -debug and
 	 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
 	 * 3bits client num.
@@ -225,7 +220,7 @@
 	 */
 
 	REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
-	/**
+	/*
 	 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
 	 * as strict.  Bits 0,1,2 - debug and management entries, 3 -
 	 * COS0 entry, 4 - COS1 entry.
@@ -237,12 +232,12 @@
 	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
 	/* defines which entries (clients) are subjected to WFQ arbitration */
 	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
-	/**
-	* For strict priority entries defines the number of consecutive
-	* slots for the highest priority.
-	*/
+	/*
+	 * For strict priority entries defines the number of consecutive
+	 * slots for the highest priority.
+	 */
 	REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
-	/**
+	/*
 	 * mapping between the CREDIT_WEIGHT registers and actual client
 	 * numbers
 	 */
@@ -255,7 +250,7 @@
 	REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
 	/* ETS mode disable */
 	REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
-	/**
+	/*
 	 * If ETS mode is enabled (there is no strict priority) defines a WFQ
 	 * weight for COS0/COS1.
 	 */
@@ -268,24 +263,24 @@
 	REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
 }
 
-void bnx2x_ets_bw_limit_common(const struct link_params *params)
+static void bnx2x_ets_bw_limit_common(const struct link_params *params)
 {
 	/* ETS disabled configuration */
 	struct bnx2x *bp = params->bp;
 	DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
-	/**
-	* defines which entries (clients) are subjected to WFQ arbitration
-	* COS0 0x8
-	* COS1 0x10
-	*/
+	/*
+	 * defines which entries (clients) are subjected to WFQ arbitration
+	 * COS0 0x8
+	 * COS1 0x10
+	 */
 	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
-	/**
-	* mapping between the ARB_CREDIT_WEIGHT registers and actual
-	* client numbers (WEIGHT_0 does not actually have to represent
-	* client 0)
-	*    PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
-	*  cos1-001     cos0-000     dbg1-100     dbg0-011     MCP-010
-	*/
+	/*
+	 * mapping between the ARB_CREDIT_WEIGHT registers and actual
+	 * client numbers (WEIGHT_0 does not actually have to represent
+	 * client 0)
+	 *    PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
+	 *  cos1-001     cos0-000     dbg1-100     dbg0-011     MCP-010
+	 */
 	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
 
 	REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
@@ -298,14 +293,14 @@
 
 	/* Defines the number of consecutive slots for the strict priority */
 	REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
-	/**
-	* Bitmap of 5bits length. Each bit specifies whether the entry behaves
-	* as strict.  Bits 0,1,2 - debug and management entries, 3 - COS0
-	* entry, 4 - COS1 entry.
-	* COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
-	* bit4   bit3	  bit2     bit1	   bit0
-	* MCP and debug are strict
-	*/
+	/*
+	 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+	 * as strict.  Bits 0,1,2 - debug and management entries, 3 - COS0
+	 * entry, 4 - COS1 entry.
+	 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+	 * bit4   bit3	  bit2     bit1	   bit0
+	 * MCP and debug are strict
+	 */
 	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
 
 	/* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
@@ -329,8 +324,7 @@
 	if ((0 == total_bw) ||
 	    (0 == cos0_bw) ||
 	    (0 == cos1_bw)) {
-		DP(NETIF_MSG_LINK,
-		   "bnx2x_ets_bw_limit: Total BW can't be zero\n");
+		DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
 		return;
 	}
 
@@ -355,7 +349,7 @@
 	u32 val	= 0;
 
 	DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
-	/**
+	/*
 	 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
 	 * as strict.  Bits 0,1,2 - debug and management entries,
 	 * 3 - COS0 entry, 4 - COS1 entry.
@@ -364,7 +358,7 @@
 	 * MCP and debug are strict
 	 */
 	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
-	/**
+	/*
 	 * For strict priority entries defines the number of consecutive slots
 	 * for the highest priority.
 	 */
@@ -377,14 +371,14 @@
 	/* Defines the number of consecutive slots for the strict priority */
 	REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
 
-	/**
-	* mapping between entry  priority to client number (0,1,2 -debug and
-	* management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
-	* 3bits client num.
-	*   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
-	* dbg0-010     dbg1-001     cos1-100     cos0-011     MCP-000
-	* dbg0-010     dbg1-001     cos0-011     cos1-100     MCP-000
-	*/
+	/*
+	 * mapping between entry  priority to client number (0,1,2 -debug and
+	 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+	 * 3bits client num.
+	 *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
+	 * dbg0-010     dbg1-001     cos1-100     cos0-011     MCP-000
+	 * dbg0-010     dbg1-001     cos0-011     cos1-100     MCP-000
+	 */
 	val = (0 == strict_cos) ? 0x2318 : 0x22E0;
 	REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
 
@@ -471,7 +465,7 @@
 /*			MAC/PBF section				  */
 /******************************************************************/
 static void bnx2x_emac_init(struct link_params *params,
-			   struct link_vars *vars)
+			    struct link_vars *vars)
 {
 	/* reset and unreset the emac core */
 	struct bnx2x *bp = params->bp;
@@ -481,10 +475,10 @@
 	u16 timeout;
 
 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
-		   (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+	       (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
 	udelay(5);
 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
-		   (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+	       (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
 
 	/* init emac - use read-modify-write */
 	/* self clear reset */
@@ -515,7 +509,7 @@
 }
 
 static u8 bnx2x_emac_enable(struct link_params *params,
-			  struct link_vars *vars, u8 lb)
+			    struct link_vars *vars, u8 lb)
 {
 	struct bnx2x *bp = params->bp;
 	u8 port = params->port;
@@ -527,55 +521,33 @@
 	/* enable emac and not bmac */
 	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
 
-	/* for paladium */
-	if (CHIP_REV_IS_EMUL(bp)) {
-		/* Use lane 1 (of lanes 0-3) */
-		REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
-		REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
-			    port*4, 1);
-	}
-	/* for fpga */
-	else
-
-	if (CHIP_REV_IS_FPGA(bp)) {
-		/* Use lane 1 (of lanes 0-3) */
-		DP(NETIF_MSG_LINK, "bnx2x_emac_enable: Setting FPGA\n");
-
-		REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
-		REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4,
-			    0);
-	} else
 	/* ASIC */
 	if (vars->phy_flags & PHY_XGXS_FLAG) {
 		u32 ser_lane = ((params->lane_config &
-			    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
-			    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+				 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+				PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
 
 		DP(NETIF_MSG_LINK, "XGXS\n");
 		/* select the master lanes (out of 0-3) */
-		REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 +
-			   port*4, ser_lane);
+		REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
 		/* select XGXS */
-		REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
-			   port*4, 1);
+		REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
 
 	} else { /* SerDes */
 		DP(NETIF_MSG_LINK, "SerDes\n");
 		/* select SerDes */
-		REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL +
-			   port*4, 0);
+		REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
 	}
 
 	bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
-		    EMAC_RX_MODE_RESET);
+		      EMAC_RX_MODE_RESET);
 	bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
-		    EMAC_TX_MODE_RESET);
+		      EMAC_TX_MODE_RESET);
 
 	if (CHIP_REV_IS_SLOW(bp)) {
 		/* config GMII mode */
 		val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
-		EMAC_WR(bp, EMAC_REG_EMAC_MODE,
-			    (val | EMAC_MODE_PORT_GMII));
+		EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
 	} else { /* ASIC */
 		/* pause enable/disable */
 		bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
@@ -605,14 +577,14 @@
 	val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
 	val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
 
-	/**
-	* Setting this bit causes MAC control frames (except for pause
-	* frames) to be passed on for processing. This setting has no
-	* affect on the operation of the pause frames. This bit effects
-	* all packets regardless of RX Parser packet sorting logic.
-	* Turn the PFC off to make sure we are in Xon state before
-	* enabling it.
-	*/
+	/*
+	 * Setting this bit causes MAC control frames (except for pause
+	 * frames) to be passed on for processing. This setting has no
+	 * affect on the operation of the pause frames. This bit effects
+	 * all packets regardless of RX Parser packet sorting logic.
+	 * Turn the PFC off to make sure we are in Xon state before
+	 * enabling it.
+	 */
 	EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
 	if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
 		DP(NETIF_MSG_LINK, "PFC is enabled\n");
@@ -666,16 +638,7 @@
 	REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
 	REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
 
-	if (CHIP_REV_IS_EMUL(bp)) {
-		/* take the BigMac out of reset */
-		REG_WR(bp,
-			   GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
-			   (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
-
-		/* enable access for bmac registers */
-		REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
-	} else
-		REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
+	REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
 
 	vars->mac_type = MAC_TYPE_EMAC;
 	return 0;
@@ -731,8 +694,7 @@
 		val |= (1<<5);
 	wb_data[0] = val;
 	wb_data[1] = 0;
-	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL,
-			wb_data, 2);
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
 	udelay(30);
 
 	/* Tx control */
@@ -768,12 +730,12 @@
 
 	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
 
-	/**
-	* Set Time (based unit is 512 bit time) between automatic
-	* re-sending of PP packets amd enable automatic re-send of
-	* Per-Priroity Packet as long as pp_gen is asserted and
-	* pp_disable is low.
-	*/
+	/*
+	 * Set Time (based unit is 512 bit time) between automatic
+	 * re-sending of PP packets amd enable automatic re-send of
+	 * Per-Priroity Packet as long as pp_gen is asserted and
+	 * pp_disable is low.
+	 */
 	val = 0x8000;
 	if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
 		val |= (1<<16); /* enable automatic re-send */
@@ -781,7 +743,7 @@
 	wb_data[0] = val;
 	wb_data[1] = 0;
 	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
-			wb_data, 2);
+		    wb_data, 2);
 
 	/* mac control */
 	val = 0x3; /* Enable RX and TX */
@@ -795,8 +757,7 @@
 
 	wb_data[0] = val;
 	wb_data[1] = 0;
-	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
-			wb_data, 2);
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
 }
 
 static void bnx2x_update_pfc_brb(struct link_params *params,
@@ -825,17 +786,25 @@
 			full_xon_th =
 			  PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
 		}
-	/* The number of free blocks below which the pause signal to class 0
-	   of MAC #n is asserted. n=0,1 */
+	/*
+	 * The number of free blocks below which the pause signal to class 0
+	 * of MAC #n is asserted. n=0,1
+	 */
 	REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th);
-	/* The number of free blocks above which the pause signal to class 0
-	   of MAC #n is de-asserted. n=0,1 */
+	/*
+	 * The number of free blocks above which the pause signal to class 0
+	 * of MAC #n is de-asserted. n=0,1
+	 */
 	REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th);
-	/* The number of free blocks below which the full signal to class 0
-	   of MAC #n is asserted. n=0,1 */
+	/*
+	 * The number of free blocks below which the full signal to class 0
+	 * of MAC #n is asserted. n=0,1
+	 */
 	REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th);
-	/* The number of free blocks above which the full signal to class 0
-	   of MAC #n is de-asserted. n=0,1 */
+	/*
+	 * The number of free blocks above which the full signal to class 0
+	 * of MAC #n is de-asserted. n=0,1
+	 */
 	REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th);
 
 	if (set_pfc && pfc_params) {
@@ -859,25 +828,25 @@
 			full_xon_th =
 			  PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
 		}
-		/**
+		/*
 		 * The number of free blocks below which the pause signal to
 		 * class 1 of MAC #n is asserted. n=0,1
-		 **/
+		 */
 		REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th);
-		/**
+		/*
 		 * The number of free blocks above which the pause signal to
 		 * class 1 of MAC #n is de-asserted. n=0,1
-		 **/
+		 */
 		REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th);
-		/**
+		/*
 		 * The number of free blocks below which the full signal to
 		 * class 1 of MAC #n is asserted. n=0,1
-		 **/
+		 */
 		REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th);
-		/**
+		/*
 		 * The number of free blocks above which the full signal to
 		 * class 1 of MAC #n is de-asserted. n=0,1
-		 **/
+		 */
 		REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th);
 	}
 }
@@ -896,7 +865,7 @@
 		FEATURE_CONFIG_PFC_ENABLED;
 	DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
 
-	/**
+	/*
 	 * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
 	 * MAC control frames (that are not pause packets)
 	 * will be forwarded to the XCM.
@@ -904,7 +873,7 @@
 	xcm_mask = REG_RD(bp,
 				port ? NIG_REG_LLH1_XCM_MASK :
 				NIG_REG_LLH0_XCM_MASK);
-	/**
+	/*
 	 * nig params will override non PFC params, since it's possible to
 	 * do transition from PFC to SAFC
 	 */
@@ -994,7 +963,7 @@
 		      struct link_vars *vars,
 		      struct bnx2x_nig_brb_pfc_port_params *pfc_params)
 {
-	/**
+	/*
 	 * The PFC and pause are orthogonal to one another, meaning when
 	 * PFC is enabled, the pause are disabled, and when PFC is
 	 * disabled, pause are set according to the pause result.
@@ -1035,7 +1004,7 @@
 
 static u8 bnx2x_bmac1_enable(struct link_params *params,
 			     struct link_vars *vars,
-			  u8 is_lb)
+			     u8 is_lb)
 {
 	struct bnx2x *bp = params->bp;
 	u8 port = params->port;
@@ -1049,9 +1018,8 @@
 	/* XGXS control */
 	wb_data[0] = 0x3c;
 	wb_data[1] = 0;
-	REG_WR_DMAE(bp, bmac_addr +
-		      BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
-		      wb_data, 2);
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
+		    wb_data, 2);
 
 	/* tx MAC SA */
 	wb_data[0] = ((params->mac_addr[2] << 24) |
@@ -1060,8 +1028,7 @@
 			params->mac_addr[5]);
 	wb_data[1] = ((params->mac_addr[0] << 8) |
 			params->mac_addr[1]);
-	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
-		    wb_data, 2);
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
 
 	/* mac control */
 	val = 0x3;
@@ -1071,43 +1038,30 @@
 	}
 	wb_data[0] = val;
 	wb_data[1] = 0;
-	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
-		    wb_data, 2);
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
 
 	/* set rx mtu */
 	wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
 	wb_data[1] = 0;
-	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE,
-			wb_data, 2);
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
 
 	bnx2x_update_pfc_bmac1(params, vars);
 
 	/* set tx mtu */
 	wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
 	wb_data[1] = 0;
-	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE,
-			wb_data, 2);
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
 
 	/* set cnt max size */
 	wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
 	wb_data[1] = 0;
-	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
-		    wb_data, 2);
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
 
 	/* configure safc */
 	wb_data[0] = 0x1000200;
 	wb_data[1] = 0;
 	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
 		    wb_data, 2);
-	/* fix for emulation */
-	if (CHIP_REV_IS_EMUL(bp)) {
-		wb_data[0] = 0xf000;
-		wb_data[1] = 0;
-		REG_WR_DMAE(bp,
-			    bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
-			    wb_data, 2);
-	}
-
 
 	return 0;
 }
@@ -1126,16 +1080,14 @@
 
 	wb_data[0] = 0;
 	wb_data[1] = 0;
-	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL,
-			wb_data, 2);
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
 	udelay(30);
 
 	/* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
 	wb_data[0] = 0x3c;
 	wb_data[1] = 0;
-	REG_WR_DMAE(bp, bmac_addr +
-			BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
-			wb_data, 2);
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
+		    wb_data, 2);
 
 	udelay(30);
 
@@ -1147,7 +1099,7 @@
 	wb_data[1] = ((params->mac_addr[0] << 8) |
 			params->mac_addr[1]);
 	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
-			wb_data, 2);
+		    wb_data, 2);
 
 	udelay(30);
 
@@ -1155,27 +1107,24 @@
 	wb_data[0] = 0x1000200;
 	wb_data[1] = 0;
 	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
-			wb_data, 2);
+		    wb_data, 2);
 	udelay(30);
 
 	/* set rx mtu */
 	wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
 	wb_data[1] = 0;
-	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE,
-			wb_data, 2);
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
 	udelay(30);
 
 	/* set tx mtu */
 	wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
 	wb_data[1] = 0;
-	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE,
-			wb_data, 2);
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
 	udelay(30);
 	/* set cnt max size */
 	wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
 	wb_data[1] = 0;
-	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE,
-			wb_data, 2);
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
 	udelay(30);
 	bnx2x_update_pfc_bmac2(params, vars, is_lb);
 
@@ -1191,11 +1140,11 @@
 	u32 val;
 	/* reset and unreset the BigMac */
 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
-		     (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+	       (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
 	msleep(1);
 
 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
-		     (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+	       (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
 
 	/* enable access for bmac registers */
 	REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
@@ -1230,15 +1179,14 @@
 	struct bnx2x *bp = params->bp;
 
 	REG_WR(bp, params->shmem_base +
-		   offsetof(struct shmem_region,
-			    port_mb[params->port].link_status),
-			link_status);
+	       offsetof(struct shmem_region,
+			port_mb[params->port].link_status), link_status);
 }
 
 static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
 {
 	u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
-		NIG_REG_INGRESS_BMAC0_MEM;
+			NIG_REG_INGRESS_BMAC0_MEM;
 	u32 wb_data[2];
 	u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
 
@@ -1250,12 +1198,12 @@
 		if (CHIP_IS_E2(bp)) {
 			/* Clear Rx Enable bit in BMAC_CONTROL register */
 			REG_RD_DMAE(bp, bmac_addr +
-					BIGMAC2_REGISTER_BMAC_CONTROL,
-					wb_data, 2);
+				    BIGMAC2_REGISTER_BMAC_CONTROL,
+				    wb_data, 2);
 			wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
 			REG_WR_DMAE(bp, bmac_addr +
-					BIGMAC2_REGISTER_BMAC_CONTROL,
-					wb_data, 2);
+				    BIGMAC2_REGISTER_BMAC_CONTROL,
+				    wb_data, 2);
 		} else {
 			/* Clear Rx Enable bit in BMAC_CONTROL register */
 			REG_RD_DMAE(bp, bmac_addr +
@@ -1271,7 +1219,7 @@
 }
 
 static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
-			 u32 line_speed)
+			   u32 line_speed)
 {
 	struct bnx2x *bp = params->bp;
 	u8 port = params->port;
@@ -1308,7 +1256,7 @@
 		/* update threshold */
 		REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
 		/* update init credit */
-		init_crd = 778; 	/* (800-18-4) */
+		init_crd = 778;		/* (800-18-4) */
 
 	} else {
 		u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
@@ -1353,6 +1301,23 @@
 	return 0;
 }
 
+/*
+ * get_emac_base
+ *
+ * @param cb
+ * @param mdc_mdio_access
+ * @param port
+ *
+ * @return u32
+ *
+ * This function selects the MDC/MDIO access (through emac0 or
+ * emac1) depend on the mdc_mdio_access, port, port swapped. Each
+ * phy has a default access mode, which could also be overridden
+ * by nvram configuration. This parameter, whether this is the
+ * default phy configuration, or the nvram overrun
+ * configuration, is passed here as mdc_mdio_access and selects
+ * the emac_base for the CL45 read/writes operations
+ */
 static u32 bnx2x_get_emac_base(struct bnx2x *bp,
 			       u32 mdc_mdio_access, u8 port)
 {
@@ -1385,13 +1350,16 @@
 
 }
 
-u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
-		    u8 devad, u16 reg, u16 val)
+/******************************************************************/
+/*			CL45 access functions			  */
+/******************************************************************/
+static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+			   u8 devad, u16 reg, u16 val)
 {
 	u32 tmp, saved_mode;
 	u8 i, rc = 0;
-
-	/* set clause 45 mode, slow down the MDIO clock to 2.5MHz
+	/*
+	 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
 	 * (a value of 49==0x31) and make sure that the AUTO poll is off
 	 */
 
@@ -1414,8 +1382,7 @@
 	for (i = 0; i < 50; i++) {
 		udelay(10);
 
-		tmp = REG_RD(bp, phy->mdio_ctrl +
-				   EMAC_REG_EMAC_MDIO_COMM);
+		tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
 		if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
 			udelay(5);
 			break;
@@ -1423,6 +1390,7 @@
 	}
 	if (tmp & EMAC_MDIO_COMM_START_BUSY) {
 		DP(NETIF_MSG_LINK, "write phy register failed\n");
+		netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
 		rc = -EFAULT;
 	} else {
 		/* data */
@@ -1435,7 +1403,7 @@
 			udelay(10);
 
 			tmp = REG_RD(bp, phy->mdio_ctrl +
-					 EMAC_REG_EMAC_MDIO_COMM);
+				     EMAC_REG_EMAC_MDIO_COMM);
 			if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
 				udelay(5);
 				break;
@@ -1443,6 +1411,7 @@
 		}
 		if (tmp & EMAC_MDIO_COMM_START_BUSY) {
 			DP(NETIF_MSG_LINK, "write phy register failed\n");
+			netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
 			rc = -EFAULT;
 		}
 	}
@@ -1453,20 +1422,20 @@
 	return rc;
 }
 
-u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
-		   u8 devad, u16 reg, u16 *ret_val)
+static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+			  u8 devad, u16 reg, u16 *ret_val)
 {
 	u32 val, saved_mode;
 	u16 i;
 	u8 rc = 0;
-
-	/* set clause 45 mode, slow down the MDIO clock to 2.5MHz
+	/*
+	 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
 	 * (a value of 49==0x31) and make sure that the AUTO poll is off
 	 */
 
 	saved_mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
 	val = saved_mode & ~((EMAC_MDIO_MODE_AUTO_POLL |
-			     EMAC_MDIO_MODE_CLOCK_CNT));
+			      EMAC_MDIO_MODE_CLOCK_CNT));
 	val |= (EMAC_MDIO_MODE_CLAUSE_45 |
 		(49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
 	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
@@ -1490,7 +1459,7 @@
 	}
 	if (val & EMAC_MDIO_COMM_START_BUSY) {
 		DP(NETIF_MSG_LINK, "read phy register failed\n");
-
+		netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
 		*ret_val = 0;
 		rc = -EFAULT;
 
@@ -1505,7 +1474,7 @@
 			udelay(10);
 
 			val = REG_RD(bp, phy->mdio_ctrl +
-					  EMAC_REG_EMAC_MDIO_COMM);
+				     EMAC_REG_EMAC_MDIO_COMM);
 			if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
 				*ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
 				break;
@@ -1513,7 +1482,7 @@
 		}
 		if (val & EMAC_MDIO_COMM_START_BUSY) {
 			DP(NETIF_MSG_LINK, "read phy register failed\n");
-
+			netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
 			*ret_val = 0;
 			rc = -EFAULT;
 		}
@@ -1529,7 +1498,7 @@
 		  u8 devad, u16 reg, u16 *ret_val)
 {
 	u8 phy_index;
-	/**
+	/*
 	 * Probe for the phy according to the given phy_addr, and execute
 	 * the read request on it
 	 */
@@ -1547,7 +1516,7 @@
 		   u8 devad, u16 reg, u16 val)
 {
 	u8 phy_index;
-	/**
+	/*
 	 * Probe for the phy according to the given phy_addr, and execute
 	 * the write request on it
 	 */
@@ -1573,19 +1542,18 @@
 
 	offset = phy->addr + ser_lane;
 	if (CHIP_IS_E2(bp))
-		aer_val = 0x2800 + offset - 1;
+		aer_val = 0x3800 + offset - 1;
 	else
 		aer_val = 0x3800 + offset;
-	CL45_WR_OVER_CL22(bp, phy,
-				MDIO_REG_BANK_AER_BLOCK,
-				MDIO_AER_BLOCK_AER_REG, aer_val);
+	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+			  MDIO_AER_BLOCK_AER_REG, aer_val);
 }
 static void bnx2x_set_aer_mmd_serdes(struct bnx2x *bp,
 				     struct bnx2x_phy *phy)
 {
-	CL45_WR_OVER_CL22(bp, phy,
-				MDIO_REG_BANK_AER_BLOCK,
-				MDIO_AER_BLOCK_AER_REG, 0x3800);
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_AER_BLOCK,
+			  MDIO_AER_BLOCK_AER_REG, 0x3800);
 }
 
 /******************************************************************/
@@ -1621,9 +1589,8 @@
 
 	bnx2x_set_serdes_access(bp, port);
 
-	REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD +
-		     port*0x10,
-		     DEFAULT_PHY_DEV_ADDR);
+	REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
+	       DEFAULT_PHY_DEV_ADDR);
 }
 
 static void bnx2x_xgxs_deassert(struct link_params *params)
@@ -1641,23 +1608,22 @@
 	udelay(500);
 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
 
-	REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST +
-		     port*0x18, 0);
+	REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0);
 	REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
-		     params->phy[INT_PHY].def_md_devad);
+	       params->phy[INT_PHY].def_md_devad);
 }
 
 
 void bnx2x_link_status_update(struct link_params *params,
-			    struct link_vars   *vars)
+			      struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 	u8 link_10g;
 	u8 port = params->port;
 
 	vars->link_status = REG_RD(bp, params->shmem_base +
-					  offsetof(struct shmem_region,
-					   port_mb[port].link_status));
+				   offsetof(struct shmem_region,
+					    port_mb[port].link_status));
 
 	vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
 
@@ -1667,7 +1633,7 @@
 		vars->phy_link_up = 1;
 		vars->duplex = DUPLEX_FULL;
 		switch (vars->link_status &
-					LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
+			LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
 			case LINK_10THD:
 				vars->duplex = DUPLEX_HALF;
 				/* fall thru */
@@ -1779,20 +1745,20 @@
 {
 	struct bnx2x *bp = params->bp;
 	u16 new_master_ln, ser_lane;
-	ser_lane =  ((params->lane_config &
+	ser_lane = ((params->lane_config &
 		     PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
-		     PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+		    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
 
 	/* set the master_ln for AN */
-	CL45_RD_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_XGXS_BLOCK2,
-			      MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
-			      &new_master_ln);
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_XGXS_BLOCK2,
+			  MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+			  &new_master_ln);
 
-	CL45_WR_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_XGXS_BLOCK2 ,
-			      MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
-			      (new_master_ln | ser_lane));
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_XGXS_BLOCK2 ,
+			  MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+			  (new_master_ln | ser_lane));
 }
 
 static u8 bnx2x_reset_unicore(struct link_params *params,
@@ -1802,17 +1768,16 @@
 	struct bnx2x *bp = params->bp;
 	u16 mii_control;
 	u16 i;
-
-	CL45_RD_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_COMBO_IEEE0,
-			      MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
 
 	/* reset the unicore */
-	CL45_WR_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_COMBO_IEEE0,
-			      MDIO_COMBO_IEEE0_MII_CONTROL,
-			      (mii_control |
-			       MDIO_COMBO_IEEO_MII_CONTROL_RESET));
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_MII_CONTROL,
+			  (mii_control |
+			   MDIO_COMBO_IEEO_MII_CONTROL_RESET));
 	if (set_serdes)
 		bnx2x_set_serdes_access(bp, params->port);
 
@@ -1821,10 +1786,10 @@
 		udelay(5);
 
 		/* the reset erased the previous bank value */
-		CL45_RD_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_COMBO_IEEE0,
-			      MDIO_COMBO_IEEE0_MII_CONTROL,
-			      &mii_control);
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_COMBO_IEEE0,
+				  MDIO_COMBO_IEEE0_MII_CONTROL,
+				  &mii_control);
 
 		if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
 			udelay(5);
@@ -1832,6 +1797,9 @@
 		}
 	}
 
+	netdev_err(bp->dev,  "Warning: PHY was not initialized,"
+			      " Port %d\n",
+			 params->port);
 	DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
 	return -EINVAL;
 
@@ -1841,43 +1809,45 @@
 				 struct bnx2x_phy *phy)
 {
 	struct bnx2x *bp = params->bp;
-	/* Each two bits represents a lane number:
-	   No swap is 0123 => 0x1b no need to enable the swap */
+	/*
+	 *  Each two bits represents a lane number:
+	 *  No swap is 0123 => 0x1b no need to enable the swap
+	 */
 	u16 ser_lane, rx_lane_swap, tx_lane_swap;
 
 	ser_lane = ((params->lane_config &
-			 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
-			PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+		     PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+		    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
 	rx_lane_swap = ((params->lane_config &
-			     PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
-			    PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
+			 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
+			PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
 	tx_lane_swap = ((params->lane_config &
-			     PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
-			    PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
+			 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
+			PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
 
 	if (rx_lane_swap != 0x1b) {
-		CL45_WR_OVER_CL22(bp, phy,
-				    MDIO_REG_BANK_XGXS_BLOCK2,
-				    MDIO_XGXS_BLOCK2_RX_LN_SWAP,
-				    (rx_lane_swap |
-				    MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
-				    MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_XGXS_BLOCK2,
+				  MDIO_XGXS_BLOCK2_RX_LN_SWAP,
+				  (rx_lane_swap |
+				   MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
+				   MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
 	} else {
-		CL45_WR_OVER_CL22(bp, phy,
-				      MDIO_REG_BANK_XGXS_BLOCK2,
-				      MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_XGXS_BLOCK2,
+				  MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
 	}
 
 	if (tx_lane_swap != 0x1b) {
-		CL45_WR_OVER_CL22(bp, phy,
-				      MDIO_REG_BANK_XGXS_BLOCK2,
-				      MDIO_XGXS_BLOCK2_TX_LN_SWAP,
-				      (tx_lane_swap |
-				       MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_XGXS_BLOCK2,
+				  MDIO_XGXS_BLOCK2_TX_LN_SWAP,
+				  (tx_lane_swap |
+				   MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
 	} else {
-		CL45_WR_OVER_CL22(bp, phy,
-				      MDIO_REG_BANK_XGXS_BLOCK2,
-				      MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_XGXS_BLOCK2,
+				  MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
 	}
 }
 
@@ -1886,66 +1856,66 @@
 {
 	struct bnx2x *bp = params->bp;
 	u16 control2;
-	CL45_RD_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_SERDES_DIGITAL,
-			      MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
-			      &control2);
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+			  &control2);
 	if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
 		control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
 	else
 		control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
 	DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
 		phy->speed_cap_mask, control2);
-	CL45_WR_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_SERDES_DIGITAL,
-			      MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
-			      control2);
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+			  control2);
 
 	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
 	     (phy->speed_cap_mask &
 		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
 		DP(NETIF_MSG_LINK, "XGXS\n");
 
-		CL45_WR_OVER_CL22(bp, phy,
-				MDIO_REG_BANK_10G_PARALLEL_DETECT,
-				MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
-				MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
+		CL22_WR_OVER_CL45(bp, phy,
+				 MDIO_REG_BANK_10G_PARALLEL_DETECT,
+				 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
+				 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
 
-		CL45_RD_OVER_CL22(bp, phy,
-				MDIO_REG_BANK_10G_PARALLEL_DETECT,
-				MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
-				&control2);
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_10G_PARALLEL_DETECT,
+				  MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+				  &control2);
 
 
 		control2 |=
 		    MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
 
-		CL45_WR_OVER_CL22(bp, phy,
-				MDIO_REG_BANK_10G_PARALLEL_DETECT,
-				MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
-				control2);
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_10G_PARALLEL_DETECT,
+				  MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+				  control2);
 
 		/* Disable parallel detection of HiG */
-		CL45_WR_OVER_CL22(bp, phy,
-				MDIO_REG_BANK_XGXS_BLOCK2,
-				MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
-				MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
-				MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_XGXS_BLOCK2,
+				  MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
+				  MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
+				  MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
 	}
 }
 
 static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
 			      struct link_params *params,
-			    struct link_vars *vars,
-			    u8 enable_cl73)
+			      struct link_vars *vars,
+			      u8 enable_cl73)
 {
 	struct bnx2x *bp = params->bp;
 	u16 reg_val;
 
 	/* CL37 Autoneg */
-	CL45_RD_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_COMBO_IEEE0,
-			      MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
 
 	/* CL37 Autoneg Enabled */
 	if (vars->line_speed == SPEED_AUTO_NEG)
@@ -1954,15 +1924,15 @@
 		reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
 			     MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
 
-	CL45_WR_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_COMBO_IEEE0,
-			      MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
 
 	/* Enable/Disable Autodetection */
 
-	CL45_RD_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_SERDES_DIGITAL,
-			      MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
 	reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
 		    MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
 	reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
@@ -1971,14 +1941,14 @@
 	else
 		reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
 
-	CL45_WR_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_SERDES_DIGITAL,
-			      MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
 
 	/* Enable TetonII and BAM autoneg */
-	CL45_RD_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_BAM_NEXT_PAGE,
-			      MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_BAM_NEXT_PAGE,
+			  MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
 			  &reg_val);
 	if (vars->line_speed == SPEED_AUTO_NEG) {
 		/* Enable BAM aneg Mode and TetonII aneg Mode */
@@ -1989,20 +1959,20 @@
 		reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
 			     MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
 	}
-	CL45_WR_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_BAM_NEXT_PAGE,
-			      MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
-			      reg_val);
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_BAM_NEXT_PAGE,
+			  MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+			  reg_val);
 
 	if (enable_cl73) {
 		/* Enable Cl73 FSM status bits */
-		CL45_WR_OVER_CL22(bp, phy,
-				      MDIO_REG_BANK_CL73_USERB0,
-				    MDIO_CL73_USERB0_CL73_UCTRL,
-				      0xe);
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_CL73_USERB0,
+				  MDIO_CL73_USERB0_CL73_UCTRL,
+				  0xe);
 
 		/* Enable BAM Station Manager*/
-		CL45_WR_OVER_CL22(bp, phy,
+		CL22_WR_OVER_CL45(bp, phy,
 			MDIO_REG_BANK_CL73_USERB0,
 			MDIO_CL73_USERB0_CL73_BAM_CTRL1,
 			MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
@@ -2010,10 +1980,10 @@
 			MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
 
 		/* Advertise CL73 link speeds */
-		CL45_RD_OVER_CL22(bp, phy,
-					      MDIO_REG_BANK_CL73_IEEEB1,
-					      MDIO_CL73_IEEEB1_AN_ADV2,
-					      &reg_val);
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_CL73_IEEEB1,
+				  MDIO_CL73_IEEEB1_AN_ADV2,
+				  &reg_val);
 		if (phy->speed_cap_mask &
 		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
 			reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
@@ -2021,10 +1991,10 @@
 		    PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
 			reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
 
-		CL45_WR_OVER_CL22(bp, phy,
-					      MDIO_REG_BANK_CL73_IEEEB1,
-					      MDIO_CL73_IEEEB1_AN_ADV2,
-				      reg_val);
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_CL73_IEEEB1,
+				  MDIO_CL73_IEEEB1_AN_ADV2,
+				  reg_val);
 
 		/* CL73 Autoneg Enabled */
 		reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
@@ -2032,37 +2002,39 @@
 	} else /* CL73 Autoneg Disabled */
 		reg_val = 0;
 
-	CL45_WR_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_CL73_IEEEB0,
-			      MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_CL73_IEEEB0,
+			  MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
 }
 
 /* program SerDes, forced speed */
 static void bnx2x_program_serdes(struct bnx2x_phy *phy,
 				 struct link_params *params,
-			       struct link_vars *vars)
+				 struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 	u16 reg_val;
 
 	/* program duplex, disable autoneg and sgmii*/
-	CL45_RD_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_COMBO_IEEE0,
-			      MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
 	reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
 		     MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
 		     MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
 	if (phy->req_duplex == DUPLEX_FULL)
 		reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
-	CL45_WR_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_COMBO_IEEE0,
-			      MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
 
-	/* program speed
-	   - needed only if the speed is greater than 1G (2.5G or 10G) */
-	CL45_RD_OVER_CL22(bp, phy,
-				      MDIO_REG_BANK_SERDES_DIGITAL,
-				      MDIO_SERDES_DIGITAL_MISC1, &reg_val);
+	/*
+	 * program speed
+	 *  - needed only if the speed is greater than 1G (2.5G or 10G)
+	 */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_MISC1, &reg_val);
 	/* clearing the speed value before setting the right speed */
 	DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
 
@@ -2083,9 +2055,9 @@
 				MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G;
 	}
 
-	CL45_WR_OVER_CL22(bp, phy,
-				      MDIO_REG_BANK_SERDES_DIGITAL,
-				      MDIO_SERDES_DIGITAL_MISC1, reg_val);
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_MISC1, reg_val);
 
 }
 
@@ -2102,13 +2074,13 @@
 		val |= MDIO_OVER_1G_UP1_2_5G;
 	if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
 		val |= MDIO_OVER_1G_UP1_10G;
-	CL45_WR_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_OVER_1G,
-			      MDIO_OVER_1G_UP1, val);
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_OVER_1G,
+			  MDIO_OVER_1G_UP1, val);
 
-	CL45_WR_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_OVER_1G,
-			      MDIO_OVER_1G_UP3, 0x400);
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_OVER_1G,
+			  MDIO_OVER_1G_UP3, 0x400);
 }
 
 static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
@@ -2116,22 +2088,21 @@
 {
 	struct bnx2x *bp = params->bp;
 	*ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
-	/* resolve pause mode and advertisement
-	 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
+	/*
+	 * Resolve pause mode and advertisement.
+	 * Please refer to Table 28B-3 of the 802.3ab-1999 spec
+	 */
 
 	switch (phy->req_flow_ctrl) {
 	case BNX2X_FLOW_CTRL_AUTO:
-		if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) {
+		if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
+			*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+		else
 			*ieee_fc |=
-			     MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
-		} else {
-			*ieee_fc |=
-		       MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
-		}
+			MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
 		break;
 	case BNX2X_FLOW_CTRL_TX:
-		*ieee_fc |=
-		       MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+		*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
 		break;
 
 	case BNX2X_FLOW_CTRL_RX:
@@ -2149,23 +2120,23 @@
 
 static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x_phy *phy,
 					     struct link_params *params,
-					   u16 ieee_fc)
+					     u16 ieee_fc)
 {
 	struct bnx2x *bp = params->bp;
 	u16 val;
 	/* for AN, we are always publishing full duplex */
 
-	CL45_WR_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_COMBO_IEEE0,
-			      MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
-	CL45_RD_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_CL73_IEEEB1,
-			      MDIO_CL73_IEEEB1_AN_ADV1, &val);
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_CL73_IEEEB1,
+			  MDIO_CL73_IEEEB1_AN_ADV1, &val);
 	val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
 	val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
-	CL45_WR_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_CL73_IEEEB1,
-			      MDIO_CL73_IEEEB1_AN_ADV1, val);
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_CL73_IEEEB1,
+			  MDIO_CL73_IEEEB1_AN_ADV1, val);
 }
 
 static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
@@ -2179,67 +2150,67 @@
 	/* Enable and restart BAM/CL37 aneg */
 
 	if (enable_cl73) {
-		CL45_RD_OVER_CL22(bp, phy,
-				      MDIO_REG_BANK_CL73_IEEEB0,
-				      MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
-				      &mii_control);
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_CL73_IEEEB0,
+				  MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+				  &mii_control);
 
-		CL45_WR_OVER_CL22(bp, phy,
-				MDIO_REG_BANK_CL73_IEEEB0,
-				MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
-				(mii_control |
-				MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
-				MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_CL73_IEEEB0,
+				  MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+				  (mii_control |
+				  MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
+				  MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
 	} else {
 
-		CL45_RD_OVER_CL22(bp, phy,
-				      MDIO_REG_BANK_COMBO_IEEE0,
-				      MDIO_COMBO_IEEE0_MII_CONTROL,
-				      &mii_control);
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_COMBO_IEEE0,
+				  MDIO_COMBO_IEEE0_MII_CONTROL,
+				  &mii_control);
 		DP(NETIF_MSG_LINK,
 			 "bnx2x_restart_autoneg mii_control before = 0x%x\n",
 			 mii_control);
-		CL45_WR_OVER_CL22(bp, phy,
-				      MDIO_REG_BANK_COMBO_IEEE0,
-				      MDIO_COMBO_IEEE0_MII_CONTROL,
-				      (mii_control |
-				       MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
-				       MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_COMBO_IEEE0,
+				  MDIO_COMBO_IEEE0_MII_CONTROL,
+				  (mii_control |
+				   MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+				   MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
 	}
 }
 
 static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
 					   struct link_params *params,
-					 struct link_vars *vars)
+					   struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
 	u16 control1;
 
 	/* in SGMII mode, the unicore is always slave */
 
-	CL45_RD_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_SERDES_DIGITAL,
-			      MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
-		      &control1);
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+			  &control1);
 	control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
 	/* set sgmii mode (and not fiber) */
 	control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
 		      MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
 		      MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
-	CL45_WR_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_SERDES_DIGITAL,
-			      MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
-			      control1);
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+			  control1);
 
 	/* if forced speed */
 	if (!(vars->line_speed == SPEED_AUTO_NEG)) {
 		/* set speed, disable autoneg */
 		u16 mii_control;
 
-		CL45_RD_OVER_CL22(bp, phy,
-				      MDIO_REG_BANK_COMBO_IEEE0,
-				      MDIO_COMBO_IEEE0_MII_CONTROL,
-				      &mii_control);
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_COMBO_IEEE0,
+				  MDIO_COMBO_IEEE0_MII_CONTROL,
+				  &mii_control);
 		mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
 				 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
 				 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
@@ -2267,10 +2238,10 @@
 		if (phy->req_duplex == DUPLEX_FULL)
 			mii_control |=
 				MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
-		CL45_WR_OVER_CL22(bp, phy,
-				      MDIO_REG_BANK_COMBO_IEEE0,
-				      MDIO_COMBO_IEEE0_MII_CONTROL,
-				      mii_control);
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_COMBO_IEEE0,
+				  MDIO_COMBO_IEEE0_MII_CONTROL,
+				  mii_control);
 
 	} else { /* AN mode */
 		/* enable and restart AN */
@@ -2285,19 +2256,19 @@
 
 static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
 {						/*  LD	    LP	 */
-	switch (pause_result) { 		/* ASYM P ASYM P */
-	case 0xb:       			/*   1  0   1  1 */
+	switch (pause_result) {			/* ASYM P ASYM P */
+	case 0xb:				/*   1  0   1  1 */
 		vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
 		break;
 
-	case 0xe:       			/*   1  1   1  0 */
+	case 0xe:				/*   1  1   1  0 */
 		vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
 		break;
 
-	case 0x5:       			/*   0  1   0  1 */
-	case 0x7:       			/*   0  1   1  1 */
-	case 0xd:       			/*   1  1   0  1 */
-	case 0xf:       			/*   1  1   1  1 */
+	case 0x5:				/*   0  1   0  1 */
+	case 0x7:				/*   0  1   1  1 */
+	case 0xd:				/*   1  1   0  1 */
+	case 0xf:				/*   1  1   1  1 */
 		vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
 		break;
 
@@ -2317,24 +2288,24 @@
 	u16 pd_10g, status2_1000x;
 	if (phy->req_line_speed != SPEED_AUTO_NEG)
 		return 0;
-	CL45_RD_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_SERDES_DIGITAL,
-			      MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
-			      &status2_1000x);
-	CL45_RD_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_SERDES_DIGITAL,
-			      MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
-			      &status2_1000x);
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+			  &status2_1000x);
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+			  &status2_1000x);
 	if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
 		DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
 			 params->port);
 		return 1;
 	}
 
-	CL45_RD_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_10G_PARALLEL_DETECT,
-			      MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
-			      &pd_10g);
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_10G_PARALLEL_DETECT,
+			  MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
+			  &pd_10g);
 
 	if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
 		DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
@@ -2373,14 +2344,14 @@
 		    (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
 		     MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
 
-			CL45_RD_OVER_CL22(bp, phy,
-					      MDIO_REG_BANK_CL73_IEEEB1,
-					      MDIO_CL73_IEEEB1_AN_ADV1,
-					      &ld_pause);
-			CL45_RD_OVER_CL22(bp, phy,
-					     MDIO_REG_BANK_CL73_IEEEB1,
-					     MDIO_CL73_IEEEB1_AN_LP_ADV1,
-					     &lp_pause);
+			CL22_RD_OVER_CL45(bp, phy,
+					  MDIO_REG_BANK_CL73_IEEEB1,
+					  MDIO_CL73_IEEEB1_AN_ADV1,
+					  &ld_pause);
+			CL22_RD_OVER_CL45(bp, phy,
+					  MDIO_REG_BANK_CL73_IEEEB1,
+					  MDIO_CL73_IEEEB1_AN_LP_ADV1,
+					  &lp_pause);
 			pause_result = (ld_pause &
 					MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
 					>> 8;
@@ -2390,18 +2361,18 @@
 			DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
 				 pause_result);
 		} else {
-			CL45_RD_OVER_CL22(bp, phy,
-					      MDIO_REG_BANK_COMBO_IEEE0,
-					      MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
-					      &ld_pause);
-			CL45_RD_OVER_CL22(bp, phy,
-			       MDIO_REG_BANK_COMBO_IEEE0,
-			       MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
-			       &lp_pause);
+			CL22_RD_OVER_CL45(bp, phy,
+					  MDIO_REG_BANK_COMBO_IEEE0,
+					  MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
+					  &ld_pause);
+			CL22_RD_OVER_CL45(bp, phy,
+				MDIO_REG_BANK_COMBO_IEEE0,
+				MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
+				&lp_pause);
 			pause_result = (ld_pause &
 				MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
 			pause_result |= (lp_pause &
-				 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
+				MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
 			DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
 				 pause_result);
 		}
@@ -2417,25 +2388,25 @@
 	u16 rx_status, ustat_val, cl37_fsm_recieved;
 	DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
 	/* Step 1: Make sure signal is detected */
-	CL45_RD_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_RX0,
-			      MDIO_RX0_RX_STATUS,
-			      &rx_status);
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_RX0,
+			  MDIO_RX0_RX_STATUS,
+			  &rx_status);
 	if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
 	    (MDIO_RX0_RX_STATUS_SIGDET)) {
 		DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
 			     "rx_status(0x80b0) = 0x%x\n", rx_status);
-		CL45_WR_OVER_CL22(bp, phy,
-				      MDIO_REG_BANK_CL73_IEEEB0,
-				      MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
-				      MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_CL73_IEEEB0,
+				  MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+				  MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
 		return;
 	}
 	/* Step 2: Check CL73 state machine */
-	CL45_RD_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_CL73_USERB0,
-			      MDIO_CL73_USERB0_CL73_USTAT1,
-			      &ustat_val);
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_CL73_USERB0,
+			  MDIO_CL73_USERB0_CL73_USTAT1,
+			  &ustat_val);
 	if ((ustat_val &
 	     (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
 	      MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
@@ -2445,12 +2416,14 @@
 			     "ustat_val(0x8371) = 0x%x\n", ustat_val);
 		return;
 	}
-	/* Step 3: Check CL37 Message Pages received to indicate LP
-	supports only CL37 */
-	CL45_RD_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_REMOTE_PHY,
-			      MDIO_REMOTE_PHY_MISC_RX_STATUS,
-			      &cl37_fsm_recieved);
+	/*
+	 * Step 3: Check CL37 Message Pages received to indicate LP
+	 * supports only CL37
+	 */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_REMOTE_PHY,
+			  MDIO_REMOTE_PHY_MISC_RX_STATUS,
+			  &cl37_fsm_recieved);
 	if ((cl37_fsm_recieved &
 	     (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
 	     MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
@@ -2461,14 +2434,18 @@
 			 cl37_fsm_recieved);
 		return;
 	}
-	/* The combined cl37/cl73 fsm state information indicating that we are
-	connected to a device which does not support cl73, but does support
-	cl37 BAM. In this case we disable cl73 and restart cl37 auto-neg */
+	/*
+	 * The combined cl37/cl73 fsm state information indicating that
+	 * we are connected to a device which does not support cl73, but
+	 * does support cl37 BAM. In this case we disable cl73 and
+	 * restart cl37 auto-neg
+	 */
+
 	/* Disable CL73 */
-	CL45_WR_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_CL73_IEEEB0,
-			      MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
-			      0);
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_CL73_IEEEB0,
+			  MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+			  0);
 	/* Restart CL37 autoneg */
 	bnx2x_restart_autoneg(phy, params, 0);
 	DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
@@ -2493,14 +2470,14 @@
 				     struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
-	u16 new_line_speed , gp_status;
+	u16 new_line_speed, gp_status;
 	u8 rc = 0;
 
 	/* Read gp_status */
-	CL45_RD_OVER_CL22(bp, phy,
-				MDIO_REG_BANK_GP_STATUS,
-				MDIO_GP_STATUS_TOP_AN_STATUS1,
-				&gp_status);
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_GP_STATUS,
+			  MDIO_GP_STATUS_TOP_AN_STATUS1,
+			  &gp_status);
 
 	if (phy->req_line_speed == SPEED_AUTO_NEG)
 		vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
@@ -2637,9 +2614,9 @@
 	u16 bank;
 
 	/* read precomp */
-	CL45_RD_OVER_CL22(bp, phy,
-			      MDIO_REG_BANK_OVER_1G,
-			      MDIO_OVER_1G_LP_UP2, &lp_up2);
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_OVER_1G,
+			  MDIO_OVER_1G_LP_UP2, &lp_up2);
 
 	/* bits [10:7] at lp_up2, positioned at [15:12] */
 	lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
@@ -2651,18 +2628,18 @@
 
 	for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
 	      bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
-		CL45_RD_OVER_CL22(bp, phy,
-				      bank,
-				      MDIO_TX0_TX_DRIVER, &tx_driver);
+		CL22_RD_OVER_CL45(bp, phy,
+				  bank,
+				  MDIO_TX0_TX_DRIVER, &tx_driver);
 
 		/* replace tx_driver bits [15:12] */
 		if (lp_up2 !=
 		    (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
 			tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
 			tx_driver |= lp_up2;
-			CL45_WR_OVER_CL22(bp, phy,
-					      bank,
-					      MDIO_TX0_TX_DRIVER, tx_driver);
+			CL22_WR_OVER_CL45(bp, phy,
+					  bank,
+					  MDIO_TX0_TX_DRIVER, tx_driver);
 		}
 	}
 }
@@ -2676,10 +2653,10 @@
 
 	DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
 	bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
-		     EMAC_REG_EMAC_MODE,
-		     (EMAC_MODE_25G_MODE |
-		     EMAC_MODE_PORT_MII_10M |
-		     EMAC_MODE_HALF_DUPLEX));
+		       EMAC_REG_EMAC_MODE,
+		       (EMAC_MODE_25G_MODE |
+			EMAC_MODE_PORT_MII_10M |
+			EMAC_MODE_HALF_DUPLEX));
 	switch (vars->line_speed) {
 	case SPEED_10:
 		mode |= EMAC_MODE_PORT_MII_10M;
@@ -2707,8 +2684,8 @@
 	if (vars->duplex == DUPLEX_HALF)
 		mode |= EMAC_MODE_HALF_DUPLEX;
 	bnx2x_bits_en(bp,
-		    GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
-		    mode);
+		      GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
+		      mode);
 
 	bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
 	return 0;
@@ -2723,7 +2700,7 @@
 
 	for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
 	      bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
-			CL45_WR_OVER_CL22(bp, phy,
+			CL22_WR_OVER_CL45(bp, phy,
 					  bank,
 					  MDIO_RX0_RX_EQ_BOOST,
 					  phy->rx_preemphasis[i]);
@@ -2731,7 +2708,7 @@
 
 	for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
 		      bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
-			CL45_WR_OVER_CL22(bp, phy,
+			CL22_WR_OVER_CL45(bp, phy,
 					  bank,
 					  MDIO_TX0_TX_DRIVER,
 					  phy->tx_preemphasis[i]);
@@ -2754,7 +2731,7 @@
 		/* forced speed requested? */
 		if (vars->line_speed != SPEED_AUTO_NEG ||
 		    (SINGLE_MEDIA_DIRECT(params) &&
-			  params->loopback_mode == LOOPBACK_EXT)) {
+		     params->loopback_mode == LOOPBACK_EXT)) {
 			DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
 
 			/* disable autoneg */
@@ -2771,7 +2748,7 @@
 
 			/* program duplex & pause advertisement (for aneg) */
 			bnx2x_set_ieee_aneg_advertisment(phy, params,
-						       vars->ieee_fc);
+							 vars->ieee_fc);
 
 			/* enable autoneg */
 			bnx2x_set_autoneg(phy, params, vars, enable_cl73);
@@ -2842,7 +2819,8 @@
 }
 
 static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
-				     struct bnx2x_phy *phy)
+				     struct bnx2x_phy *phy,
+				     struct link_params *params)
 {
 	u16 cnt, ctrl;
 	/* Wait for soft reset to get cleared upto 1 sec */
@@ -2853,6 +2831,11 @@
 			break;
 		msleep(1);
 	}
+
+	if (cnt == 1000)
+		netdev_err(bp->dev,  "Warning: PHY was not initialized,"
+				      " Port %d\n",
+			 params->port);
 	DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
 	return cnt;
 }
@@ -2863,9 +2846,7 @@
 	u32 mask;
 	struct bnx2x *bp = params->bp;
 
-	/* setting the status to report on link up
-	   for either XGXS or SerDes */
-
+	/* Setting the status to report on link up for either XGXS or SerDes */
 	if (params->switch_cfg == SWITCH_CFG_10G) {
 		mask = (NIG_MASK_XGXS0_LINK10G |
 			NIG_MASK_XGXS0_LINK_STATUS);
@@ -2908,7 +2889,7 @@
 {
 	u32 latch_status = 0;
 
-	/**
+	/*
 	 * Disable the MI INT ( external phy int ) by writing 1 to the
 	 * status register. Link down indication is high-active-signal,
 	 * so in this case we need to write the status to clear the XOR
@@ -2933,27 +2914,30 @@
 
 		/* For all latched-signal=up : Re-Arm Latch signals */
 		REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
-			     (latch_status & 0xfffe) | (latch_status & 1));
+		       (latch_status & 0xfffe) | (latch_status & 1));
 	}
 	/* For all latched-signal=up,Write original_signal to status */
 }
 
 static void bnx2x_link_int_ack(struct link_params *params,
-			     struct link_vars *vars, u8 is_10g)
+			       struct link_vars *vars, u8 is_10g)
 {
 	struct bnx2x *bp = params->bp;
 	u8 port = params->port;
 
-	/* first reset all status
-	 * we assume only one line will be change at a time */
+	/*
+	 * First reset all status we assume only one line will be
+	 * change at a time
+	 */
 	bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
-		     (NIG_STATUS_XGXS0_LINK10G |
-		      NIG_STATUS_XGXS0_LINK_STATUS |
-		      NIG_STATUS_SERDES0_LINK_STATUS));
+		       (NIG_STATUS_XGXS0_LINK10G |
+			NIG_STATUS_XGXS0_LINK_STATUS |
+			NIG_STATUS_SERDES0_LINK_STATUS));
 	if (vars->phy_link_up) {
 		if (is_10g) {
-			/* Disable the 10G link interrupt
-			 * by writing 1 to the status register
+			/*
+			 * Disable the 10G link interrupt by writing 1 to the
+			 * status register
 			 */
 			DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
 			bnx2x_bits_en(bp,
@@ -2961,9 +2945,9 @@
 				      NIG_STATUS_XGXS0_LINK10G);
 
 		} else if (params->switch_cfg == SWITCH_CFG_10G) {
-			/* Disable the link interrupt
-			 * by writing 1 to the relevant lane
-			 * in the status register
+			/*
+			 * Disable the link interrupt by writing 1 to the
+			 * relevant lane in the status register
 			 */
 			u32 ser_lane = ((params->lane_config &
 				    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
@@ -2978,8 +2962,9 @@
 
 		} else { /* SerDes */
 			DP(NETIF_MSG_LINK, "SerDes phy link up\n");
-			/* Disable the link interrupt
-			 * by writing 1 to the status register
+			/*
+			 * Disable the link interrupt by writing 1 to the status
+			 * register
 			 */
 			bnx2x_bits_en(bp,
 				      NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
@@ -3059,8 +3044,7 @@
 	}
 	if ((params->num_phys == MAX_PHYS) &&
 	    (params->phy[EXT_PHY2].ver_addr != 0)) {
-		spirom_ver = REG_RD(bp,
-					  params->phy[EXT_PHY2].ver_addr);
+		spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr);
 		if (params->phy[EXT_PHY2].format_fw_ver) {
 			*ver_p = '/';
 			ver_p++;
@@ -3089,29 +3073,27 @@
 
 		/* change the uni_phy_addr in the nig */
 		md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
-					  port*0x18));
+				       port*0x18));
 
 		REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
 
 		bnx2x_cl45_write(bp, phy,
-			       5,
-			       (MDIO_REG_BANK_AER_BLOCK +
-				(MDIO_AER_BLOCK_AER_REG & 0xf)),
-			       0x2800);
+				 5,
+				 (MDIO_REG_BANK_AER_BLOCK +
+				  (MDIO_AER_BLOCK_AER_REG & 0xf)),
+				 0x2800);
 
 		bnx2x_cl45_write(bp, phy,
-			       5,
-			       (MDIO_REG_BANK_CL73_IEEEB0 +
-				(MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
-			       0x6041);
+				 5,
+				 (MDIO_REG_BANK_CL73_IEEEB0 +
+				  (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
+				 0x6041);
 		msleep(200);
 		/* set aer mmd back */
 		bnx2x_set_aer_mmd_xgxs(params, phy);
 
 		/* and md_devad */
-		REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
-			    md_devad);
-
+		REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
 	} else {
 		u16 mii_ctrl;
 		DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
@@ -3152,56 +3134,71 @@
 	case LED_MODE_OFF:
 		REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
 		REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
-			   SHARED_HW_CFG_LED_MAC1);
+		       SHARED_HW_CFG_LED_MAC1);
 
 		tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
 		EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
 		break;
 
 	case LED_MODE_OPER:
-		/**
+		/*
 		 * For all other phys, OPER mode is same as ON, so in case
 		 * link is down, do nothing
-		 **/
+		 */
 		if (!vars->link_up)
 			break;
 	case LED_MODE_ON:
-		if (SINGLE_MEDIA_DIRECT(params)) {
-			/**
-			* This is a work-around for HW issue found when link
-			* is up in CL73
-			*/
+		if (params->phy[EXT_PHY1].type ==
+		    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 &&
+		    CHIP_IS_E2(bp) && params->num_phys == 2) {
+			/*
+			 * This is a work-around for E2+8727 Configurations
+			 */
+			if (mode == LED_MODE_ON ||
+				speed == SPEED_10000){
+				REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+				REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+
+				tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+				EMAC_WR(bp, EMAC_REG_EMAC_LED,
+					(tmp | EMAC_LED_OVERRIDE));
+				return rc;
+			}
+		} else if (SINGLE_MEDIA_DIRECT(params)) {
+			/*
+			 * This is a work-around for HW issue found when link
+			 * is up in CL73
+			 */
 			REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
 			REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
 		} else {
-			REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
-				   hw_led_mode);
+			REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
 		}
 
-		REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
-			   port*4, 0);
+		REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
 		/* Set blinking rate to ~15.9Hz */
 		REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
-			   LED_BLINK_RATE_VAL);
+		       LED_BLINK_RATE_VAL);
 		REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
-			   port*4, 1);
+		       port*4, 1);
 		tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
-		EMAC_WR(bp, EMAC_REG_EMAC_LED,
-			    (tmp & (~EMAC_LED_OVERRIDE)));
+		EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE)));
 
 		if (CHIP_IS_E1(bp) &&
 		    ((speed == SPEED_2500) ||
 		     (speed == SPEED_1000) ||
 		     (speed == SPEED_100) ||
 		     (speed == SPEED_10))) {
-			/* On Everest 1 Ax chip versions for speeds less than
-			10G LED scheme is different */
+			/*
+			 * On Everest 1 Ax chip versions for speeds less than
+			 * 10G LED scheme is different
+			 */
 			REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
-				   + port*4, 1);
+			       + port*4, 1);
 			REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
-				   port*4, 0);
+			       port*4, 0);
 			REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
-				   port*4, 1);
+			       port*4, 1);
 		}
 		break;
 
@@ -3215,7 +3212,7 @@
 
 }
 
-/**
+/*
  * This function comes to reflect the actual link state read DIRECTLY from the
  * HW
  */
@@ -3227,10 +3224,10 @@
 	u8 ext_phy_link_up = 0, serdes_phy_type;
 	struct link_vars temp_vars;
 
-	CL45_RD_OVER_CL22(bp, &params->phy[INT_PHY],
-			      MDIO_REG_BANK_GP_STATUS,
-			      MDIO_GP_STATUS_TOP_AN_STATUS1,
-			      &gp_status);
+	CL22_RD_OVER_CL45(bp, &params->phy[INT_PHY],
+			  MDIO_REG_BANK_GP_STATUS,
+			  MDIO_GP_STATUS_TOP_AN_STATUS1,
+			  &gp_status);
 	/* link is up only if both local phy and external phy are up */
 	if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
 		return -ESRCH;
@@ -3274,15 +3271,15 @@
 	u8 rc = 0;
 	u8 phy_index, non_ext_phy;
 	struct bnx2x *bp = params->bp;
-	/**
-	* In case of external phy existence, the line speed would be the
-	* line speed linked up by the external phy. In case it is direct
-	* only, then the line_speed during initialization will be
-	* equal to the req_line_speed
-	*/
+	/*
+	 * In case of external phy existence, the line speed would be the
+	 * line speed linked up by the external phy. In case it is direct
+	 * only, then the line_speed during initialization will be
+	 * equal to the req_line_speed
+	 */
 	vars->line_speed = params->phy[INT_PHY].req_line_speed;
 
-	/**
+	/*
 	 * Initialize the internal phy in case this is a direct board
 	 * (no external phys), or this board has external phy which requires
 	 * to first.
@@ -3310,17 +3307,16 @@
 	if (!non_ext_phy)
 		for (phy_index = EXT_PHY1; phy_index < params->num_phys;
 		      phy_index++) {
-			/**
+			/*
 			 * No need to initialize second phy in case of first
 			 * phy only selection. In case of second phy, we do
 			 * need to initialize the first phy, since they are
 			 * connected.
-			 **/
+			 */
 			if (phy_index == EXT_PHY2 &&
 			    (bnx2x_phy_selection(params) ==
 			     PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
-				DP(NETIF_MSG_LINK, "Not initializing"
-						   "second phy\n");
+				DP(NETIF_MSG_LINK, "Ignoring second phy\n");
 				continue;
 			}
 			params->phy[phy_index].config_init(
@@ -3342,9 +3338,8 @@
 				 struct link_params *params)
 {
 	/* reset the SerDes/XGXS */
-	REG_WR(params->bp, GRCBASE_MISC +
-		     MISC_REGISTERS_RESET_REG_3_CLEAR,
-		     (0x1ff << (params->port*16)));
+	REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
+	       (0x1ff << (params->port*16)));
 }
 
 static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
@@ -3358,11 +3353,11 @@
 	else
 		gpio_port = params->port;
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-			    MISC_REGISTERS_GPIO_OUTPUT_LOW,
-			    gpio_port);
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW,
+		       gpio_port);
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-			    MISC_REGISTERS_GPIO_OUTPUT_LOW,
-			    gpio_port);
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW,
+		       gpio_port);
 	DP(NETIF_MSG_LINK, "reset external PHY\n");
 }
 
@@ -3393,9 +3388,8 @@
 
 	/* reset BigMac */
 	bnx2x_bmac_rx_disable(bp, params->port);
-	REG_WR(bp, GRCBASE_MISC +
-		   MISC_REGISTERS_RESET_REG_2_CLEAR,
-		   (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
 	return 0;
 }
 
@@ -3446,7 +3440,7 @@
 	msleep(20);
 	return rc;
 }
-/**
+/*
  * The bnx2x_link_update function should be called upon link
  * interrupt.
  * Link is considered up as follows:
@@ -3485,12 +3479,11 @@
 		 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
 
 	is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
-				    port*0x18) > 0);
+				port*0x18) > 0);
 	DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
 		 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
 		 is_mi_int,
-		 REG_RD(bp,
-			    NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
+		 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
 
 	DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
 	  REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
@@ -3499,14 +3492,14 @@
 	/* disable emac */
 	REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
 
-	/**
-	* Step 1:
-	* Check external link change only for external phys, and apply
-	* priority selection between them in case the link on both phys
-	* is up. Note that the instead of the common vars, a temporary
-	* vars argument is used since each phy may have different link/
-	* speed/duplex result
-	*/
+	/*
+	 * Step 1:
+	 * Check external link change only for external phys, and apply
+	 * priority selection between them in case the link on both phys
+	 * is up. Note that the instead of the common vars, a temporary
+	 * vars argument is used since each phy may have different link/
+	 * speed/duplex result
+	 */
 	for (phy_index = EXT_PHY1; phy_index < params->num_phys;
 	      phy_index++) {
 		struct bnx2x_phy *phy = &params->phy[phy_index];
@@ -3531,22 +3524,22 @@
 			switch (bnx2x_phy_selection(params)) {
 			case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
 			case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
-			/**
+			/*
 			 * In this option, the first PHY makes sure to pass the
 			 * traffic through itself only.
 			 * Its not clear how to reset the link on the second phy
-			 **/
+			 */
 				active_external_phy = EXT_PHY1;
 				break;
 			case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
-			/**
+			/*
 			 * In this option, the first PHY makes sure to pass the
 			 * traffic through the second PHY.
-			 **/
+			 */
 				active_external_phy = EXT_PHY2;
 				break;
 			default:
-			/**
+			/*
 			 * Link indication on both PHYs with the following cases
 			 * is invalid:
 			 * - FIRST_PHY means that second phy wasn't initialized,
@@ -3554,7 +3547,7 @@
 			 * - SECOND_PHY means that first phy should not be able
 			 * to link up by itself (using configuration)
 			 * - DEFAULT should be overriden during initialiazation
-			 **/
+			 */
 				DP(NETIF_MSG_LINK, "Invalid link indication"
 					   "mpc=0x%x. DISABLING LINK !!!\n",
 					   params->multi_phy_config);
@@ -3564,18 +3557,18 @@
 		}
 	}
 	prev_line_speed = vars->line_speed;
-	/**
-	* Step 2:
-	* Read the status of the internal phy. In case of
-	* DIRECT_SINGLE_MEDIA board, this link is the external link,
-	* otherwise this is the link between the 577xx and the first
-	* external phy
-	*/
+	/*
+	 * Step 2:
+	 * Read the status of the internal phy. In case of
+	 * DIRECT_SINGLE_MEDIA board, this link is the external link,
+	 * otherwise this is the link between the 577xx and the first
+	 * external phy
+	 */
 	if (params->phy[INT_PHY].read_status)
 		params->phy[INT_PHY].read_status(
 			&params->phy[INT_PHY],
 			params, vars);
-	/**
+	/*
 	 * The INT_PHY flow control reside in the vars. This include the
 	 * case where the speed or flow control are not set to AUTO.
 	 * Otherwise, the active external phy flow control result is set
@@ -3585,13 +3578,13 @@
 	 */
 	if (active_external_phy > INT_PHY) {
 		vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
-		/**
+		/*
 		 * Link speed is taken from the XGXS. AN and FC result from
 		 * the external phy.
 		 */
 		vars->link_status |= phy_vars[active_external_phy].link_status;
 
-		/**
+		/*
 		 * if active_external_phy is first PHY and link is up - disable
 		 * disable TX on second external PHY
 		 */
@@ -3627,7 +3620,7 @@
 	DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
 		   " ext_phy_line_speed = %d\n", vars->flow_ctrl,
 		   vars->link_status, ext_phy_line_speed);
-	/**
+	/*
 	 * Upon link speed change set the NIG into drain mode. Comes to
 	 * deals with possible FIFO glitch due to clk change when speed
 	 * is decreased without link down indicator
@@ -3642,8 +3635,8 @@
 				   ext_phy_line_speed);
 			vars->phy_link_up = 0;
 		} else if (prev_line_speed != vars->line_speed) {
-			REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
-				     + params->port*4, 0);
+			REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
+			       0);
 			msleep(1);
 		}
 	}
@@ -3658,14 +3651,14 @@
 
 	bnx2x_link_int_ack(params, vars, link_10g);
 
-	/**
-	* In case external phy link is up, and internal link is down
-	* (not initialized yet probably after link initialization, it
-	* needs to be initialized.
-	* Note that after link down-up as result of cable plug, the xgxs
-	* link would probably become up again without the need
-	* initialize it
-	*/
+	/*
+	 * In case external phy link is up, and internal link is down
+	 * (not initialized yet probably after link initialization, it
+	 * needs to be initialized.
+	 * Note that after link down-up as result of cable plug, the xgxs
+	 * link would probably become up again without the need
+	 * initialize it
+	 */
 	if (!(SINGLE_MEDIA_DIRECT(params))) {
 		DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
 			   " init_preceding = %d\n", ext_phy_link_up,
@@ -3685,9 +3678,9 @@
 						vars);
 		}
 	}
-	/**
-	 *  Link is up only if both local phy and external phy (in case of
-	 *  non-direct board) are up
+	/*
+	 * Link is up only if both local phy and external phy (in case of
+	 * non-direct board) are up
 	 */
 	vars->link_up = (vars->phy_link_up &&
 			 (ext_phy_link_up ||
@@ -3708,10 +3701,10 @@
 void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
 {
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-			    MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
 	msleep(1);
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-			    MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
 }
 
 static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
@@ -3731,9 +3724,9 @@
 	u16 fw_ver1, fw_ver2;
 
 	bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
-		      MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+			MDIO_PMA_REG_ROM_VER1, &fw_ver1);
 	bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
-		      MDIO_PMA_REG_ROM_VER2, &fw_ver2);
+			MDIO_PMA_REG_ROM_VER2, &fw_ver2);
 	bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
 				  phy->ver_addr);
 }
@@ -3754,7 +3747,7 @@
 	if ((vars->ieee_fc &
 	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
 	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
-		val |=  MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+		val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
 	}
 	if ((vars->ieee_fc &
 	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
@@ -3785,11 +3778,11 @@
 	else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
 		ret = 1;
 		bnx2x_cl45_read(bp, phy,
-			      MDIO_AN_DEVAD,
-			      MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+				MDIO_AN_DEVAD,
+				MDIO_AN_REG_ADV_PAUSE, &ld_pause);
 		bnx2x_cl45_read(bp, phy,
-			      MDIO_AN_DEVAD,
-			      MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+				MDIO_AN_DEVAD,
+				MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
 		pause_result = (ld_pause &
 				MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
 		pause_result |= (lp_pause &
@@ -3854,90 +3847,82 @@
 			   pause_result);
 	}
 }
-
-static void bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
+static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
 					      struct bnx2x_phy *phy,
 					      u8 port)
 {
+	u32 count = 0;
+	u16 fw_ver1, fw_msgout;
+	u8 rc = 0;
+
 	/* Boot port from external ROM  */
 	/* EDC grst */
 	bnx2x_cl45_write(bp, phy,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_GEN_CTRL,
-		       0x0001);
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_GEN_CTRL,
+			 0x0001);
 
 	/* ucode reboot and rst */
 	bnx2x_cl45_write(bp, phy,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_GEN_CTRL,
-		       0x008c);
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_GEN_CTRL,
+			 0x008c);
 
 	bnx2x_cl45_write(bp, phy,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
 
 	/* Reset internal microprocessor */
 	bnx2x_cl45_write(bp, phy,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_GEN_CTRL,
-		       MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_GEN_CTRL,
+			 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
 
 	/* Release srst bit */
 	bnx2x_cl45_write(bp, phy,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_GEN_CTRL,
-		       MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_GEN_CTRL,
+			 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
 
-	/* wait for 120ms for code download via SPI port */
-	msleep(120);
+	/* Delay 100ms per the PHY specifications */
+	msleep(100);
+
+	/* 8073 sometimes taking longer to download */
+	do {
+		count++;
+		if (count > 300) {
+			DP(NETIF_MSG_LINK,
+				 "bnx2x_8073_8727_external_rom_boot port %x:"
+				 "Download failed. fw version = 0x%x\n",
+				 port, fw_ver1);
+			rc = -EINVAL;
+			break;
+		}
+
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
+
+		msleep(1);
+	} while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
+			((fw_msgout & 0xff) != 0x03 && (phy->type ==
+			PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
 
 	/* Clear ser_boot_ctl bit */
 	bnx2x_cl45_write(bp, phy,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
 	bnx2x_save_bcm_spirom_ver(bp, phy, port);
-}
 
-static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp,
-					       struct bnx2x_phy *phy)
-{
-	u16 val;
-	bnx2x_cl45_read(bp, phy,
-			MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV, &val);
+	DP(NETIF_MSG_LINK,
+		 "bnx2x_8073_8727_external_rom_boot port %x:"
+		 "Download complete. fw version = 0x%x\n",
+		 port, fw_ver1);
 
-	if (val == 0) {
-		/* Mustn't set low power mode in 8073 A0 */
-		return;
-	}
-
-	/* Disable PLL sequencer (use read-modify-write to clear bit 13) */
-	bnx2x_cl45_read(bp, phy,
-			MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
-	val &= ~(1<<13);
-	bnx2x_cl45_write(bp, phy,
-		       MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
-
-	/* PLL controls */
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805E, 0x1077);
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805D, 0x0000);
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805C, 0x030B);
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805B, 0x1240);
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x805A, 0x2490);
-
-	/* Tx Controls */
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A7, 0x0C74);
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A6, 0x9041);
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80A5, 0x4640);
-
-	/* Rx Controls */
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FE, 0x01C4);
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FD, 0x9249);
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, 0x80FC, 0x2015);
-
-	/* Enable PLL sequencer  (use read-modify-write to set bit 13) */
-	bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, &val);
-	val |= (1<<13);
-	bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, MDIO_XS_PLL_SEQUENCER, val);
+	return rc;
 }
 
 /******************************************************************/
@@ -3950,8 +3935,8 @@
 
 	/* Read 8073 HW revision*/
 	bnx2x_cl45_read(bp, phy,
-		      MDIO_PMA_DEVAD,
-		      MDIO_PMA_REG_8073_CHIP_REV, &val);
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_8073_CHIP_REV, &val);
 
 	if (val != 1) {
 		/* No need to workaround in 8073 A1 */
@@ -3959,8 +3944,8 @@
 	}
 
 	bnx2x_cl45_read(bp, phy,
-		      MDIO_PMA_DEVAD,
-		      MDIO_PMA_REG_ROM_VER2, &val);
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_ROM_VER2, &val);
 
 	/* SNR should be applied only for version 0x102 */
 	if (val != 0x102)
@@ -3974,8 +3959,8 @@
 	u16 val, cnt, cnt1 ;
 
 	bnx2x_cl45_read(bp, phy,
-		      MDIO_PMA_DEVAD,
-		      MDIO_PMA_REG_8073_CHIP_REV, &val);
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_8073_CHIP_REV, &val);
 
 	if (val > 0) {
 		/* No need to workaround in 8073 A1 */
@@ -3983,26 +3968,32 @@
 	}
 	/* XAUI workaround in 8073 A0: */
 
-	/* After loading the boot ROM and restarting Autoneg,
-	poll Dev1, Reg $C820: */
+	/*
+	 * After loading the boot ROM and restarting Autoneg, poll
+	 * Dev1, Reg $C820:
+	 */
 
 	for (cnt = 0; cnt < 1000; cnt++) {
 		bnx2x_cl45_read(bp, phy,
-			      MDIO_PMA_DEVAD,
-			      MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
-			      &val);
-		  /* If bit [14] = 0 or bit [13] = 0, continue on with
-		   system initialization (XAUI work-around not required,
-		    as these bits indicate 2.5G or 1G link up). */
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+				&val);
+		  /*
+		   * If bit [14] = 0 or bit [13] = 0, continue on with
+		   * system initialization (XAUI work-around not required, as
+		   * these bits indicate 2.5G or 1G link up).
+		   */
 		if (!(val & (1<<14)) || !(val & (1<<13))) {
 			DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
 			return 0;
 		} else if (!(val & (1<<15))) {
-			DP(NETIF_MSG_LINK, "clc bit 15 went off\n");
-			 /* If bit 15 is 0, then poll Dev1, Reg $C841 until
-			  it's MSB (bit 15) goes to 1 (indicating that the
-			  XAUI workaround has completed),
-			  then continue on with system initialization.*/
+			DP(NETIF_MSG_LINK, "bit 15 went off\n");
+			/*
+			 * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
+			 * MSB (bit15) goes to 1 (indicating that the XAUI
+			 * workaround has completed), then continue on with
+			 * system initialization.
+			 */
 			for (cnt1 = 0; cnt1 < 1000; cnt1++) {
 				bnx2x_cl45_read(bp, phy,
 					MDIO_PMA_DEVAD,
@@ -4085,10 +4076,10 @@
 		gpio_port = params->port;
 	/* Restore normal power mode*/
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-			    MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
 
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-			    MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
 
 	/* enable LASI */
 	bnx2x_cl45_write(bp, phy,
@@ -4098,8 +4089,6 @@
 
 	bnx2x_8073_set_pause_cl37(params, phy, vars);
 
-	bnx2x_8073_set_xaui_low_power_mode(bp, phy);
-
 	bnx2x_cl45_read(bp, phy,
 			MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
 
@@ -4108,6 +4097,21 @@
 
 	DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
 
+	/* Swap polarity if required - Must be done only in non-1G mode */
+	if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+		/* Configure the 8073 to swap _P and _N of the KR lines */
+		DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n");
+		/* 10G Rx/Tx and 1G Tx signal polarity swap */
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL,
+				 (val | (3<<9)));
+	}
+
+
 	/* Enable CL37 BAM */
 	if (REG_RD(bp, params->shmem_base +
 			 offsetof(struct shmem_region, dev_info.
@@ -4135,8 +4139,10 @@
 			val = (1<<7);
 		} else if (phy->req_line_speed ==  SPEED_2500) {
 			val = (1<<5);
-			/* Note that 2.5G works only
-			when used with 1G advertisment */
+			/*
+			 * Note that 2.5G works only when used with 1G
+			 * advertisment
+			 */
 		} else
 			val = (1<<5);
 	} else {
@@ -4145,8 +4151,7 @@
 			PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
 			val |= (1<<7);
 
-		/* Note that 2.5G works only when
-		used with 1G advertisment */
+		/* Note that 2.5G works only when used with 1G advertisment */
 		if (phy->speed_cap_mask &
 			(PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
 			 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
@@ -4186,9 +4191,11 @@
 	/* Add support for CL37 (passive mode) III */
 	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
 
-	/* The SNR will improve about 2db by changing
-	BW and FEE main tap. Rest commands are executed
-	after link is up*/
+	/*
+	 * The SNR will improve about 2db by changing BW and FEE main
+	 * tap. Rest commands are executed after link is up
+	 * Change FFE main cursor to 5 in EDC register
+	 */
 	if (bnx2x_8073_is_snr_needed(bp, phy))
 		bnx2x_cl45_write(bp, phy,
 				 MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
@@ -4272,12 +4279,11 @@
 
 	link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
 	if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
-		/* The SNR will improve about 2dbby
-		changing the BW and FEE main tap.*/
-		/* The 1st write to change FFE main
-		tap is set before restart AN */
-		/* Change PLL Bandwidth in EDC
-		register */
+		/*
+		 * The SNR will improve about 2dbby changing the BW and FEE main
+		 * tap. The 1st write to change FFE main tap is set before
+		 * restart AN. Change PLL Bandwidth in EDC register
+		 */
 		bnx2x_cl45_write(bp, phy,
 				 MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
 				 0x26BC);
@@ -4314,8 +4320,32 @@
 	}
 
 	if (link_up) {
+		/* Swap polarity if required */
+		if (params->lane_config &
+		    PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+			/* Configure the 8073 to swap P and N of the KR lines */
+			bnx2x_cl45_read(bp, phy,
+					MDIO_XS_DEVAD,
+					MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
+			/*
+			 * Set bit 3 to invert Rx in 1G mode and clear this bit
+			 * when it`s in 10G mode.
+			 */
+			if (vars->line_speed == SPEED_1000) {
+				DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
+					      "the 8073\n");
+				val1 |= (1<<3);
+			} else
+				val1 &= ~(1<<3);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_XS_DEVAD,
+					 MDIO_XS_REG_8073_RX_CTRL_PCIE,
+					 val1);
+		}
 		bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
 		bnx2x_8073_resolve_fc(phy, params, vars);
+		vars->duplex = DUPLEX_FULL;
 	}
 	return link_up;
 }
@@ -4332,8 +4362,8 @@
 	DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
 	   gpio_port);
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-			    MISC_REGISTERS_GPIO_OUTPUT_LOW,
-			    gpio_port);
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW,
+		       gpio_port);
 }
 
 /******************************************************************/
@@ -4347,11 +4377,11 @@
 	DP(NETIF_MSG_LINK, "init 8705\n");
 	/* Restore normal power mode*/
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-			    MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
 	/* HW reset */
 	bnx2x_ext_phy_hw_reset(bp, params->port);
 	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
-	bnx2x_wait_reset_complete(bp, phy);
+	bnx2x_wait_reset_complete(bp, phy, params);
 
 	bnx2x_cl45_write(bp, phy,
 			 MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
@@ -4402,35 +4432,79 @@
 /******************************************************************/
 /*			SFP+ module Section			  */
 /******************************************************************/
-static void bnx2x_sfp_set_transmitter(struct bnx2x *bp,
+static u8 bnx2x_get_gpio_port(struct link_params *params)
+{
+	u8 gpio_port;
+	u32 swap_val, swap_override;
+	struct bnx2x *bp = params->bp;
+	if (CHIP_IS_E2(bp))
+		gpio_port = BP_PATH(bp);
+	else
+		gpio_port = params->port;
+	swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+	swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+	return gpio_port ^ (swap_val && swap_override);
+}
+static void bnx2x_sfp_set_transmitter(struct link_params *params,
 				      struct bnx2x_phy *phy,
-				      u8 port,
 				      u8 tx_en)
 {
 	u16 val;
+	u8 port = params->port;
+	struct bnx2x *bp = params->bp;
+	u32 tx_en_mode;
 
-	DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x\n",
-		 tx_en, port);
 	/* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
-	bnx2x_cl45_read(bp, phy,
-		      MDIO_PMA_DEVAD,
-		      MDIO_PMA_REG_PHY_IDENTIFIER,
-		      &val);
+	tx_en_mode = REG_RD(bp, params->shmem_base +
+			    offsetof(struct shmem_region,
+				     dev_info.port_hw_config[port].sfp_ctrl)) &
+		PORT_HW_CFG_TX_LASER_MASK;
+	DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x "
+			   "mode = %x\n", tx_en, port, tx_en_mode);
+	switch (tx_en_mode) {
+	case PORT_HW_CFG_TX_LASER_MDIO:
 
-	if (tx_en)
-		val &= ~(1<<15);
-	else
-		val |= (1<<15);
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_PHY_IDENTIFIER,
+				&val);
 
-	bnx2x_cl45_write(bp, phy,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_PHY_IDENTIFIER,
-		       val);
+		if (tx_en)
+			val &= ~(1<<15);
+		else
+			val |= (1<<15);
+
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_PHY_IDENTIFIER,
+				 val);
+	break;
+	case PORT_HW_CFG_TX_LASER_GPIO0:
+	case PORT_HW_CFG_TX_LASER_GPIO1:
+	case PORT_HW_CFG_TX_LASER_GPIO2:
+	case PORT_HW_CFG_TX_LASER_GPIO3:
+	{
+		u16 gpio_pin;
+		u8 gpio_port, gpio_mode;
+		if (tx_en)
+			gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
+		else
+			gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
+
+		gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
+		gpio_port = bnx2x_get_gpio_port(params);
+		bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+		break;
+	}
+	default:
+		DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode);
+		break;
+	}
 }
 
 static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
 					    struct link_params *params,
-					  u16 addr, u8 byte_cnt, u8 *o_buf)
+					    u16 addr, u8 byte_cnt, u8 *o_buf)
 {
 	struct bnx2x *bp = params->bp;
 	u16 val = 0;
@@ -4443,23 +4517,23 @@
 	/* Set the read command byte count */
 	bnx2x_cl45_write(bp, phy,
 			 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
-		       (byte_cnt | 0xa000));
+			 (byte_cnt | 0xa000));
 
 	/* Set the read command address */
 	bnx2x_cl45_write(bp, phy,
 			 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
-		       addr);
+			 addr);
 
 	/* Activate read command */
 	bnx2x_cl45_write(bp, phy,
 			 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
-		       0x2c0f);
+			 0x2c0f);
 
 	/* Wait up to 500us for command complete status */
 	for (i = 0; i < 100; i++) {
 		bnx2x_cl45_read(bp, phy,
-			      MDIO_PMA_DEVAD,
-			      MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
 		if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
 		    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
 			break;
@@ -4477,15 +4551,15 @@
 	/* Read the buffer */
 	for (i = 0; i < byte_cnt; i++) {
 		bnx2x_cl45_read(bp, phy,
-			      MDIO_PMA_DEVAD,
-			      MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
 		o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
 	}
 
 	for (i = 0; i < 100; i++) {
 		bnx2x_cl45_read(bp, phy,
-			      MDIO_PMA_DEVAD,
-			      MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
 		if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
 		    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
 			return 0;
@@ -4496,7 +4570,7 @@
 
 static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
 					    struct link_params *params,
-					  u16 addr, u8 byte_cnt, u8 *o_buf)
+					    u16 addr, u8 byte_cnt, u8 *o_buf)
 {
 	struct bnx2x *bp = params->bp;
 	u16 val, i;
@@ -4509,41 +4583,43 @@
 
 	/* Need to read from 1.8000 to clear it */
 	bnx2x_cl45_read(bp, phy,
-		      MDIO_PMA_DEVAD,
-		      MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
-		      &val);
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+			&val);
 
 	/* Set the read command byte count */
 	bnx2x_cl45_write(bp, phy,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
-		       ((byte_cnt < 2) ? 2 : byte_cnt));
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+			 ((byte_cnt < 2) ? 2 : byte_cnt));
 
 	/* Set the read command address */
 	bnx2x_cl45_write(bp, phy,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
-		       addr);
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+			 addr);
 	/* Set the destination address */
 	bnx2x_cl45_write(bp, phy,
-		       MDIO_PMA_DEVAD,
-		       0x8004,
-		       MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
+			 MDIO_PMA_DEVAD,
+			 0x8004,
+			 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
 
 	/* Activate read command */
 	bnx2x_cl45_write(bp, phy,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
-		       0x8002);
-	/* Wait appropriate time for two-wire command to finish before
-	polling the status register */
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+			 0x8002);
+	/*
+	 * Wait appropriate time for two-wire command to finish before
+	 * polling the status register
+	 */
 	msleep(1);
 
 	/* Wait up to 500us for command complete status */
 	for (i = 0; i < 100; i++) {
 		bnx2x_cl45_read(bp, phy,
-			      MDIO_PMA_DEVAD,
-			      MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
 		if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
 		    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
 			break;
@@ -4555,21 +4631,21 @@
 		DP(NETIF_MSG_LINK,
 			 "Got bad status 0x%x when reading from SFP+ EEPROM\n",
 			 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
-		return -EINVAL;
+		return -EFAULT;
 	}
 
 	/* Read the buffer */
 	for (i = 0; i < byte_cnt; i++) {
 		bnx2x_cl45_read(bp, phy,
-			      MDIO_PMA_DEVAD,
-			      MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
 		o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
 	}
 
 	for (i = 0; i < 100; i++) {
 		bnx2x_cl45_read(bp, phy,
-			      MDIO_PMA_DEVAD,
-			      MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
 		if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
 		    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
 			return 0;
@@ -4579,22 +4655,22 @@
 	return -EINVAL;
 }
 
-static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
-				       struct link_params *params, u16 addr,
-				       u8 byte_cnt, u8 *o_buf)
+u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+				struct link_params *params, u16 addr,
+				u8 byte_cnt, u8 *o_buf)
 {
 	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
 		return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
-						       byte_cnt, o_buf);
+							 byte_cnt, o_buf);
 	else if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
 		return bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
-						       byte_cnt, o_buf);
+							 byte_cnt, o_buf);
 	return -EINVAL;
 }
 
 static u8 bnx2x_get_edc_mode(struct bnx2x_phy *phy,
 			     struct link_params *params,
-				  u16 *edc_mode)
+			     u16 *edc_mode)
 {
 	struct bnx2x *bp = params->bp;
 	u8 val, check_limiting_mode = 0;
@@ -4615,8 +4691,10 @@
 	{
 		u8 copper_module_type;
 
-		/* Check if its active cable( includes SFP+ module)
-		of passive cable*/
+		/*
+		 * Check if its active cable (includes SFP+ module)
+		 * of passive cable
+		 */
 		if (bnx2x_read_sfp_module_eeprom(phy,
 					       params,
 					       SFP_EEPROM_FC_TX_TECH_ADDR,
@@ -4675,8 +4753,10 @@
 	DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
 	return 0;
 }
-/* This function read the relevant field from the module ( SFP+ ),
-	and verify it is compliant with this board */
+/*
+ * This function read the relevant field from the module (SFP+), and verify it
+ * is compliant with this board
+ */
 static u8 bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
 				  struct link_params *params)
 {
@@ -4725,24 +4805,24 @@
 	/* format the warning message */
 	if (bnx2x_read_sfp_module_eeprom(phy,
 					 params,
-				       SFP_EEPROM_VENDOR_NAME_ADDR,
-				       SFP_EEPROM_VENDOR_NAME_SIZE,
-				       (u8 *)vendor_name))
+					 SFP_EEPROM_VENDOR_NAME_ADDR,
+					 SFP_EEPROM_VENDOR_NAME_SIZE,
+					 (u8 *)vendor_name))
 		vendor_name[0] = '\0';
 	else
 		vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
 	if (bnx2x_read_sfp_module_eeprom(phy,
 					 params,
-				       SFP_EEPROM_PART_NO_ADDR,
-				       SFP_EEPROM_PART_NO_SIZE,
-				       (u8 *)vendor_pn))
+					 SFP_EEPROM_PART_NO_ADDR,
+					 SFP_EEPROM_PART_NO_SIZE,
+					 (u8 *)vendor_pn))
 		vendor_pn[0] = '\0';
 	else
 		vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
 
-	netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected,"
-			     " Port %d from %s part number %s\n",
-		    params->port, vendor_name, vendor_pn);
+	netdev_err(bp->dev,  "Warning: Unqualified SFP+ module detected,"
+			      " Port %d from %s part number %s\n",
+			 params->port, vendor_name, vendor_pn);
 	phy->flags |= FLAGS_SFP_NOT_APPROVED;
 	return -EINVAL;
 }
@@ -4754,8 +4834,11 @@
 	u8 val;
 	struct bnx2x *bp = params->bp;
 	u16 timeout;
-	/* Initialization time after hot-plug may take up to 300ms for some
-	phys type ( e.g. JDSU ) */
+	/*
+	 * Initialization time after hot-plug may take up to 300ms for
+	 * some phys type ( e.g. JDSU )
+	 */
+
 	for (timeout = 0; timeout < 60; timeout++) {
 		if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
 		    == 0) {
@@ -4774,16 +4857,14 @@
 	/* Make sure GPIOs are not using for LED mode */
 	u16 val;
 	/*
-	 * In the GPIO register, bit 4 is use to detemine if the GPIOs are
+	 * In the GPIO register, bit 4 is use to determine if the GPIOs are
 	 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
 	 * output
 	 * Bits 0-1 determine the gpios value for OUTPUT in case bit 4 val is 0
 	 * Bits 8-9 determine the gpios value for INPUT in case bit 4 val is 1
 	 * where the 1st bit is the over-current(only input), and 2nd bit is
 	 * for power( only output )
-	*/
-
-	/*
+	 *
 	 * In case of NOC feature is disabled and power is up, set GPIO control
 	 *  as input to enable listening of over-current indication
 	 */
@@ -4812,15 +4893,14 @@
 	u16 cur_limiting_mode;
 
 	bnx2x_cl45_read(bp, phy,
-		      MDIO_PMA_DEVAD,
-		      MDIO_PMA_REG_ROM_VER2,
-		      &cur_limiting_mode);
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_ROM_VER2,
+			&cur_limiting_mode);
 	DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
 		 cur_limiting_mode);
 
 	if (edc_mode == EDC_MODE_LIMITING) {
-		DP(NETIF_MSG_LINK,
-			 "Setting LIMITING MODE\n");
+		DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n");
 		bnx2x_cl45_write(bp, phy,
 				 MDIO_PMA_DEVAD,
 				 MDIO_PMA_REG_ROM_VER2,
@@ -4829,62 +4909,63 @@
 
 		DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
 
-		/* Changing to LRM mode takes quite few seconds.
-		So do it only if current mode is limiting
-		( default is LRM )*/
+		/*
+		 * Changing to LRM mode takes quite few seconds. So do it only
+		 * if current mode is limiting (default is LRM)
+		 */
 		if (cur_limiting_mode != EDC_MODE_LIMITING)
 			return 0;
 
 		bnx2x_cl45_write(bp, phy,
-			       MDIO_PMA_DEVAD,
-			       MDIO_PMA_REG_LRM_MODE,
-			       0);
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_LRM_MODE,
+				 0);
 		bnx2x_cl45_write(bp, phy,
-			       MDIO_PMA_DEVAD,
-			       MDIO_PMA_REG_ROM_VER2,
-			       0x128);
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_ROM_VER2,
+				 0x128);
 		bnx2x_cl45_write(bp, phy,
-			       MDIO_PMA_DEVAD,
-			       MDIO_PMA_REG_MISC_CTRL0,
-			       0x4008);
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_MISC_CTRL0,
+				 0x4008);
 		bnx2x_cl45_write(bp, phy,
-			       MDIO_PMA_DEVAD,
-			       MDIO_PMA_REG_LRM_MODE,
-			       0xaaaa);
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_LRM_MODE,
+				 0xaaaa);
 	}
 	return 0;
 }
 
 static u8 bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
 				       struct bnx2x_phy *phy,
-					u16 edc_mode)
+				       u16 edc_mode)
 {
 	u16 phy_identifier;
 	u16 rom_ver2_val;
 	bnx2x_cl45_read(bp, phy,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_PHY_IDENTIFIER,
-		       &phy_identifier);
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_PHY_IDENTIFIER,
+			&phy_identifier);
 
 	bnx2x_cl45_write(bp, phy,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_PHY_IDENTIFIER,
-		       (phy_identifier & ~(1<<9)));
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_PHY_IDENTIFIER,
+			 (phy_identifier & ~(1<<9)));
 
 	bnx2x_cl45_read(bp, phy,
-		      MDIO_PMA_DEVAD,
-		      MDIO_PMA_REG_ROM_VER2,
-		      &rom_ver2_val);
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_ROM_VER2,
+			&rom_ver2_val);
 	/* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
 	bnx2x_cl45_write(bp, phy,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_ROM_VER2,
-		       (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_ROM_VER2,
+			 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
 
 	bnx2x_cl45_write(bp, phy,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_PHY_IDENTIFIER,
-		       (phy_identifier | (1<<9)));
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_PHY_IDENTIFIER,
+			 (phy_identifier | (1<<9)));
 
 	return 0;
 }
@@ -4897,11 +4978,11 @@
 
 	switch (action) {
 	case DISABLE_TX:
-		bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+		bnx2x_sfp_set_transmitter(params, phy, 0);
 		break;
 	case ENABLE_TX:
 		if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
-			bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
+			bnx2x_sfp_set_transmitter(params, phy, 1);
 		break;
 	default:
 		DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
@@ -4910,6 +4991,38 @@
 	}
 }
 
+static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
+					   u8 gpio_mode)
+{
+	struct bnx2x *bp = params->bp;
+
+	u32 fault_led_gpio = REG_RD(bp, params->shmem_base +
+			    offsetof(struct shmem_region,
+			dev_info.port_hw_config[params->port].sfp_ctrl)) &
+		PORT_HW_CFG_FAULT_MODULE_LED_MASK;
+	switch (fault_led_gpio) {
+	case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
+		return;
+	case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0:
+	case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
+	case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
+	case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
+	{
+		u8 gpio_port = bnx2x_get_gpio_port(params);
+		u16 gpio_pin = fault_led_gpio -
+			PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
+		DP(NETIF_MSG_LINK, "Set fault module-detected led "
+				   "pin %x port %x mode %x\n",
+			       gpio_pin, gpio_port, gpio_mode);
+		bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+	}
+	break;
+	default:
+		DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n",
+			       fault_led_gpio);
+	}
+}
+
 static u8 bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
 				     struct link_params *params)
 {
@@ -4927,15 +5040,14 @@
 	if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
 		DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
 		return -EINVAL;
-	} else if (bnx2x_verify_sfp_module(phy, params) !=
-		   0) {
+	} else if (bnx2x_verify_sfp_module(phy, params) != 0) {
 		/* check SFP+ module compatibility */
 		DP(NETIF_MSG_LINK, "Module verification failed!!\n");
 		rc = -EINVAL;
 		/* Turn on fault module-detected led */
-		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
-				  MISC_REGISTERS_GPIO_HIGH,
-				  params->port);
+		bnx2x_set_sfp_module_fault_led(params,
+					       MISC_REGISTERS_GPIO_HIGH);
+
 		if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) &&
 		    ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
 		     PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN)) {
@@ -4946,18 +5058,17 @@
 		}
 	} else {
 		/* Turn off fault module-detected led */
-		DP(NETIF_MSG_LINK, "Turn off fault module-detected led\n");
-		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
-					  MISC_REGISTERS_GPIO_LOW,
-					  params->port);
+		bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
 	}
 
 	/* power up the SFP module */
 	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727)
 		bnx2x_8727_power_module(bp, phy, 1);
 
-	/* Check and set limiting mode / LRM mode on 8726.
-	On 8727 it is done automatically */
+	/*
+	 * Check and set limiting mode / LRM mode on 8726. On 8727 it
+	 * is done automatically
+	 */
 	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
 		bnx2x_8726_set_limiting_mode(bp, phy, edc_mode);
 	else
@@ -4969,9 +5080,9 @@
 	if (rc == 0 ||
 	    (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
 	    PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
-		bnx2x_sfp_set_transmitter(bp, phy, params->port, 1);
+		bnx2x_sfp_set_transmitter(params, phy, 1);
 	else
-		bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+		bnx2x_sfp_set_transmitter(params, phy, 0);
 
 	return rc;
 }
@@ -4984,11 +5095,9 @@
 	u8 port = params->port;
 
 	/* Set valid module led off */
-	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
-			  MISC_REGISTERS_GPIO_HIGH,
-			  params->port);
+	bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
 
-	/* Get current gpio val refelecting module plugged in / out*/
+	/* Get current gpio val reflecting module plugged in / out*/
 	gpio_val = bnx2x_get_gpio(bp, MISC_REGISTERS_GPIO_3, port);
 
 	/* Call the handling function in case module is detected */
@@ -5004,18 +5113,20 @@
 			DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
 	} else {
 		u32 val = REG_RD(bp, params->shmem_base +
-				     offsetof(struct shmem_region, dev_info.
-					      port_feature_config[params->port].
-					      config));
+				 offsetof(struct shmem_region, dev_info.
+					  port_feature_config[params->port].
+					  config));
 
 		bnx2x_set_gpio_int(bp, MISC_REGISTERS_GPIO_3,
 				   MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
 				   port);
-		/* Module was plugged out. */
-		/* Disable transmit for this module */
+		/*
+		 * Module was plugged out.
+		 * Disable transmit for this module
+		 */
 		if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
 		    PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
-			bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+			bnx2x_sfp_set_transmitter(params, phy, 0);
 	}
 }
 
@@ -5051,9 +5162,9 @@
 
 	DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
 			" link_status 0x%x\n", rx_sd, pcs_status, val2);
-	/* link is up if both bit 0 of pmd_rx_sd and
-	 * bit 0 of pcs_status are set, or if the autoneg bit
-	 * 1 is set
+	/*
+	 * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
+	 * are set, or if the autoneg bit 1 is set
 	 */
 	link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
 	if (link_up) {
@@ -5062,6 +5173,7 @@
 		else
 			vars->line_speed = SPEED_10000;
 		bnx2x_ext_phy_resolve_fc(phy, params, vars);
+		vars->duplex = DUPLEX_FULL;
 	}
 	return link_up;
 }
@@ -5073,14 +5185,15 @@
 				 struct link_params *params,
 				 struct link_vars *vars)
 {
-	u16 cnt, val;
+	u32 tx_en_mode;
+	u16 cnt, val, tmp1;
 	struct bnx2x *bp = params->bp;
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-			    MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
 	/* HW reset */
 	bnx2x_ext_phy_hw_reset(bp, params->port);
 	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
-	bnx2x_wait_reset_complete(bp, phy);
+	bnx2x_wait_reset_complete(bp, phy, params);
 
 	/* Wait until fw is loaded */
 	for (cnt = 0; cnt < 100; cnt++) {
@@ -5147,6 +5260,26 @@
 				 0x0004);
 	}
 	bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
+
+	/*
+	 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+	 * power mode, if TX Laser is disabled
+	 */
+
+	tx_en_mode = REG_RD(bp, params->shmem_base +
+			    offsetof(struct shmem_region,
+				dev_info.port_hw_config[params->port].sfp_ctrl))
+			& PORT_HW_CFG_TX_LASER_MASK;
+
+	if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+		DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+		bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1);
+		tmp1 |= 0x1;
+		bnx2x_cl45_write(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
+	}
+
 	return 0;
 }
 
@@ -5181,26 +5314,26 @@
 
 	/* Set soft reset */
 	bnx2x_cl45_write(bp, phy,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_GEN_CTRL,
-		       MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_GEN_CTRL,
+			 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
 
 	bnx2x_cl45_write(bp, phy,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
 
 	bnx2x_cl45_write(bp, phy,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_GEN_CTRL,
-		       MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_GEN_CTRL,
+			 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
 
 	/* wait for 150ms for microcode load */
 	msleep(150);
 
 	/* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
 	bnx2x_cl45_write(bp, phy,
-		       MDIO_PMA_DEVAD,
-		       MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
 
 	msleep(200);
 	bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
@@ -5235,23 +5368,18 @@
 	u32 val;
 	u32 swap_val, swap_override, aeu_gpio_mask, offset;
 	DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
-	/* Restore normal power mode*/
-	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-			    MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
-
-	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-			    MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
 
 	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
-	bnx2x_wait_reset_complete(bp, phy);
+	bnx2x_wait_reset_complete(bp, phy, params);
 
 	bnx2x_8726_external_rom_boot(phy, params);
 
-	/* Need to call module detected on initialization since
-	the module detection triggered by actual module
-	insertion might occur before driver is loaded, and when
-	driver is loaded, it reset all registers, including the
-	transmitter */
+	/*
+	 * Need to call module detected on initialization since the module
+	 * detection triggered by actual module insertion might occur before
+	 * driver is loaded, and when driver is loaded, it reset all
+	 * registers, including the transmitter
+	 */
 	bnx2x_sfp_module_detection(phy, params);
 
 	if (phy->req_line_speed == SPEED_1000) {
@@ -5284,8 +5412,10 @@
 				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
 		bnx2x_cl45_write(bp, phy,
 				MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
-		/* Enable RX-ALARM control to receive
-		interrupt for 1G speed change */
+		/*
+		 * Enable RX-ALARM control to receive interrupt for 1G speed
+		 * change
+		 */
 		bnx2x_cl45_write(bp, phy,
 				 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x4);
 		bnx2x_cl45_write(bp, phy,
@@ -5317,7 +5447,7 @@
 
 	/* Set GPIO3 to trigger SFP+ module insertion/removal */
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
-			    MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
+		       MISC_REGISTERS_GPIO_INPUT_HI_Z, params->port);
 
 	/* The GPIO should be swapped if the swap register is set and active */
 	swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
@@ -5408,7 +5538,7 @@
 				struct link_params *params) {
 	u32 swap_val, swap_override;
 	u8 port;
-	/**
+	/*
 	 * The PHY reset is controlled by GPIO 1. Fake the port number
 	 * to cancel the swap done in set_gpio()
 	 */
@@ -5417,20 +5547,21 @@
 	swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
 	port = (swap_val && swap_override) ^ 1;
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
-			    MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
 }
 
 static u8 bnx2x_8727_config_init(struct bnx2x_phy *phy,
 				 struct link_params *params,
 				 struct link_vars *vars)
 {
-	u16 tmp1, val, mod_abs;
+	u32 tx_en_mode;
+	u16 tmp1, val, mod_abs, tmp2;
 	u16 rx_alarm_ctrl_val;
 	u16 lasi_ctrl_val;
 	struct bnx2x *bp = params->bp;
 	/* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
 
-	bnx2x_wait_reset_complete(bp, phy);
+	bnx2x_wait_reset_complete(bp, phy, params);
 	rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
 	lasi_ctrl_val = 0x0004;
 
@@ -5443,14 +5574,17 @@
 	bnx2x_cl45_write(bp, phy,
 			 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, lasi_ctrl_val);
 
-	/* Initially configure  MOD_ABS to interrupt when
-	module is presence( bit 8) */
+	/*
+	 * Initially configure MOD_ABS to interrupt when module is
+	 * presence( bit 8)
+	 */
 	bnx2x_cl45_read(bp, phy,
 			MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
-	/* Set EDC off by setting OPTXLOS signal input to low
-	(bit 9).
-	When the EDC is off it locks onto a reference clock and
-	avoids becoming 'lost'.*/
+	/*
+	 * Set EDC off by setting OPTXLOS signal input to low (bit 9).
+	 * When the EDC is off it locks onto a reference clock and avoids
+	 * becoming 'lost'
+	 */
 	mod_abs &= ~(1<<8);
 	if (!(phy->flags & FLAGS_NOC))
 		mod_abs &= ~(1<<9);
@@ -5465,7 +5599,7 @@
 	if (phy->flags & FLAGS_NOC)
 		val |= (3<<5);
 
-	/**
+	/*
 	 * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
 	 * status which reflect SFP+ module over-current
 	 */
@@ -5492,7 +5626,7 @@
 		bnx2x_cl45_read(bp, phy,
 				MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
 		DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
-		/**
+		/*
 		 * Power down the XAUI until link is up in case of dual-media
 		 * and 1G
 		 */
@@ -5518,7 +5652,7 @@
 		bnx2x_cl45_write(bp, phy,
 				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
 	} else {
-		/**
+		/*
 		 * Since the 8727 has only single reset pin, need to set the 10G
 		 * registers although it is default
 		 */
@@ -5534,7 +5668,8 @@
 				 0x0008);
 	}
 
-	/* Set 2-wire transfer rate of SFP+ module EEPROM
+	/*
+	 * Set 2-wire transfer rate of SFP+ module EEPROM
 	 * to 100Khz since some DACs(direct attached cables) do
 	 * not work at 400Khz.
 	 */
@@ -5557,6 +5692,26 @@
 				 phy->tx_preemphasis[1]);
 	}
 
+	/*
+	 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+	 * power mode, if TX Laser is disabled
+	 */
+	tx_en_mode = REG_RD(bp, params->shmem_base +
+			    offsetof(struct shmem_region,
+				dev_info.port_hw_config[params->port].sfp_ctrl))
+			& PORT_HW_CFG_TX_LASER_MASK;
+
+	if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+
+		DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+		bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2);
+		tmp2 |= 0x1000;
+		tmp2 &= 0xFFEF;
+		bnx2x_cl45_write(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
+	}
+
 	return 0;
 }
 
@@ -5570,46 +5725,49 @@
 				      port_feature_config[params->port].
 				      config));
 	bnx2x_cl45_read(bp, phy,
-		      MDIO_PMA_DEVAD,
-		      MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
 	if (mod_abs & (1<<8)) {
 
 		/* Module is absent */
 		DP(NETIF_MSG_LINK, "MOD_ABS indication "
 			    "show module is absent\n");
 
-		/* 1. Set mod_abs to detect next module
-		presence event
-		   2. Set EDC off by setting OPTXLOS signal input to low
-			(bit 9).
-			When the EDC is off it locks onto a reference clock and
-			avoids becoming 'lost'.*/
+		/*
+		 * 1. Set mod_abs to detect next module
+		 *    presence event
+		 * 2. Set EDC off by setting OPTXLOS signal input to low
+		 *    (bit 9).
+		 *    When the EDC is off it locks onto a reference clock and
+		 *    avoids becoming 'lost'.
+		 */
 		mod_abs &= ~(1<<8);
 		if (!(phy->flags & FLAGS_NOC))
 			mod_abs &= ~(1<<9);
 		bnx2x_cl45_write(bp, phy,
-			       MDIO_PMA_DEVAD,
-			       MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
 
-		/* Clear RX alarm since it stays up as long as
-		the mod_abs wasn't changed */
+		/*
+		 * Clear RX alarm since it stays up as long as
+		 * the mod_abs wasn't changed
+		 */
 		bnx2x_cl45_read(bp, phy,
-			      MDIO_PMA_DEVAD,
-			      MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
 
 	} else {
 		/* Module is present */
 		DP(NETIF_MSG_LINK, "MOD_ABS indication "
 			    "show module is present\n");
-		/* First thing, disable transmitter,
-		and if the module is ok, the
-		module_detection will enable it*/
-
-		/* 1. Set mod_abs to detect next module
-		absent event ( bit 8)
-		   2. Restore the default polarity of the OPRXLOS signal and
-		this signal will then correctly indicate the presence or
-		absence of the Rx signal. (bit 9) */
+		/*
+		 * First disable transmitter, and if the module is ok, the
+		 * module_detection will enable it
+		 * 1. Set mod_abs to detect next module absent event ( bit 8)
+		 * 2. Restore the default polarity of the OPRXLOS signal and
+		 * this signal will then correctly indicate the presence or
+		 * absence of the Rx signal. (bit 9)
+		 */
 		mod_abs |= (1<<8);
 		if (!(phy->flags & FLAGS_NOC))
 			mod_abs |= (1<<9);
@@ -5617,10 +5775,12 @@
 				 MDIO_PMA_DEVAD,
 				 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
 
-		/* Clear RX alarm since it stays up as long as
-		the mod_abs wasn't changed. This is need to be done
-		before calling the module detection, otherwise it will clear
-		the link update alarm */
+		/*
+		 * Clear RX alarm since it stays up as long as the mod_abs
+		 * wasn't changed. This is need to be done before calling the
+		 * module detection, otherwise it will clear* the link update
+		 * alarm
+		 */
 		bnx2x_cl45_read(bp, phy,
 				MDIO_PMA_DEVAD,
 				MDIO_PMA_REG_RX_ALARM, &rx_alarm_status);
@@ -5628,7 +5788,7 @@
 
 		if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
 		    PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
-			bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+			bnx2x_sfp_set_transmitter(params, phy, 0);
 
 		if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
 			bnx2x_sfp_module_detection(phy, params);
@@ -5637,9 +5797,8 @@
 	}
 
 	DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
-		 rx_alarm_status);
-	/* No need to check link status in case of
-	module plugged in/out */
+		   rx_alarm_status);
+	/* No need to check link status in case of module plugged in/out */
 }
 
 static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
@@ -5675,7 +5834,7 @@
 	bnx2x_cl45_read(bp, phy,
 			MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
 
-	/**
+	/*
 	 * If a module is present and there is need to check
 	 * for over current
 	 */
@@ -5695,12 +5854,8 @@
 					    " Please remove the SFP+ module and"
 					    " restart the system to clear this"
 					    " error.\n",
-				   params->port);
-
-			/*
-			 * Disable all RX_ALARMs except for
-			 * mod_abs
-			 */
+			 params->port);
+			/* Disable all RX_ALARMs except for mod_abs */
 			bnx2x_cl45_write(bp, phy,
 					 MDIO_PMA_DEVAD,
 					 MDIO_PMA_REG_RX_ALARM_CTRL, (1<<5));
@@ -5743,11 +5898,15 @@
 			MDIO_PMA_DEVAD,
 			MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
 
-	/* Bits 0..2 --> speed detected,
-	   bits 13..15--> link is down */
+	/*
+	 * Bits 0..2 --> speed detected,
+	 * Bits 13..15--> link is down
+	 */
 	if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
 		link_up = 1;
 		vars->line_speed = SPEED_10000;
+		DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
+			   params->port);
 	} else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
 		link_up = 1;
 		vars->line_speed = SPEED_1000;
@@ -5758,15 +5917,18 @@
 		DP(NETIF_MSG_LINK, "port %x: External link is down\n",
 			   params->port);
 	}
-	if (link_up)
+	if (link_up) {
 		bnx2x_ext_phy_resolve_fc(phy, params, vars);
+		vars->duplex = DUPLEX_FULL;
+		DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex);
+	}
 
 	if ((DUAL_MEDIA(params)) &&
 	    (phy->req_line_speed == SPEED_1000)) {
 		bnx2x_cl45_read(bp, phy,
 				MDIO_PMA_DEVAD,
 				MDIO_PMA_REG_8727_PCS_GP, &val1);
-		/**
+		/*
 		 * In case of dual-media board and 1G, power up the XAUI side,
 		 * otherwise power it down. For 10G it is done automatically
 		 */
@@ -5786,7 +5948,7 @@
 {
 	struct bnx2x *bp = params->bp;
 	/* Disable Transmitter */
-	bnx2x_sfp_set_transmitter(bp, phy, params->port, 0);
+	bnx2x_sfp_set_transmitter(params, phy, 0);
 	/* Clear LASI */
 	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0);
 
@@ -5798,19 +5960,23 @@
 static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
 					   struct link_params *params)
 {
-	u16 val, fw_ver1, fw_ver2, cnt;
+	u16 val, fw_ver1, fw_ver2, cnt, adj;
 	struct bnx2x *bp = params->bp;
 
+	adj = 0;
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+		adj = -1;
+
 	/* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
 	/* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
-	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
-	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
-	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
-	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
-	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0014);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, 0x0000);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, 0x0300);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x0009);
 
 	for (cnt = 0; cnt < 100; cnt++) {
-		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
 		if (val & 1)
 			break;
 		udelay(5);
@@ -5824,11 +5990,11 @@
 
 
 	/* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
-	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
-	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
-	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819 + adj, 0x0000);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A + adj, 0xc200);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817 + adj, 0x000A);
 	for (cnt = 0; cnt < 100; cnt++) {
-		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818 + adj, &val);
 		if (val & 1)
 			break;
 		udelay(5);
@@ -5841,9 +6007,9 @@
 	}
 
 	/* lower 16 bits of the register SPI_FW_STATUS */
-	bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
+	bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B + adj, &fw_ver1);
 	/* upper 16 bits of register SPI_FW_STATUS */
-	bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
+	bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C + adj, &fw_ver2);
 
 	bnx2x_save_spirom_version(bp, params->port, (fw_ver2<<16) | fw_ver1,
 				  phy->ver_addr);
@@ -5852,33 +6018,53 @@
 static void bnx2x_848xx_set_led(struct bnx2x *bp,
 				struct bnx2x_phy *phy)
 {
-	u16 val;
+	u16 val, adj;
+
+	adj = 0;
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+		adj = -1;
 
 	/* PHYC_CTL_LED_CTL */
 	bnx2x_cl45_read(bp, phy,
 			MDIO_PMA_DEVAD,
-			MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+			MDIO_PMA_REG_8481_LINK_SIGNAL + adj, &val);
 	val &= 0xFE00;
 	val |= 0x0092;
 
 	bnx2x_cl45_write(bp, phy,
 			 MDIO_PMA_DEVAD,
-			 MDIO_PMA_REG_8481_LINK_SIGNAL, val);
+			 MDIO_PMA_REG_8481_LINK_SIGNAL + adj, val);
 
 	bnx2x_cl45_write(bp, phy,
 			 MDIO_PMA_DEVAD,
-			 MDIO_PMA_REG_8481_LED1_MASK,
+			 MDIO_PMA_REG_8481_LED1_MASK + adj,
 			 0x80);
 
 	bnx2x_cl45_write(bp, phy,
 			 MDIO_PMA_DEVAD,
-			 MDIO_PMA_REG_8481_LED2_MASK,
+			 MDIO_PMA_REG_8481_LED2_MASK + adj,
 			 0x18);
 
+	/* Select activity source by Tx and Rx, as suggested by PHY AE */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_8481_LED3_MASK + adj,
+			 0x0006);
+
+	/* Select the closest activity blink rate to that in 10/100/1000 */
+	bnx2x_cl45_write(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_8481_LED3_BLINK + adj,
+			0);
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, &val);
+	val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
+
 	bnx2x_cl45_write(bp, phy,
 			 MDIO_PMA_DEVAD,
-			 MDIO_PMA_REG_8481_LED3_MASK,
-			 0x0040);
+			 MDIO_PMA_REG_84823_CTL_LED_CTL_1 + adj, val);
 
 	/* 'Interrupt Mask' */
 	bnx2x_cl45_write(bp, phy,
@@ -5892,7 +6078,11 @@
 {
 	struct bnx2x *bp = params->bp;
 	u16 autoneg_val, an_1000_val, an_10_100_val;
-
+	/*
+	 * This phy uses the NIG latch mechanism since link indication
+	 * arrives through its LED4 and not via its LASI signal, so we
+	 * get steady signal instead of clear on read
+	 */
 	bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
 		      1 << NIG_LATCH_BC_ENABLE_MI_INT);
 
@@ -6017,11 +6207,11 @@
 	struct bnx2x *bp = params->bp;
 	/* Restore normal power mode*/
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-			    MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
 
 	/* HW reset */
 	bnx2x_ext_phy_hw_reset(bp, params->port);
-	bnx2x_wait_reset_complete(bp, phy);
+	bnx2x_wait_reset_complete(bp, phy, params);
 
 	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
 	return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -6033,12 +6223,15 @@
 {
 	struct bnx2x *bp = params->bp;
 	u8 port, initialize = 1;
-	u16 val;
+	u16 val, adj;
 	u16 temp;
-	u32 actual_phy_selection;
+	u32 actual_phy_selection, cms_enable;
 	u8 rc = 0;
 
 	/* This is just for MDIO_CTL_REG_84823_MEDIA register. */
+	adj = 0;
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+		adj = 3;
 
 	msleep(1);
 	if (CHIP_IS_E2(bp))
@@ -6048,11 +6241,12 @@
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
 		       MISC_REGISTERS_GPIO_OUTPUT_HIGH,
 		       port);
-	bnx2x_wait_reset_complete(bp, phy);
+	bnx2x_wait_reset_complete(bp, phy, params);
 	/* Wait for GPHY to come out of reset */
 	msleep(50);
-	/* BCM84823 requires that XGXS links up first @ 10G for normal
-	behavior */
+	/*
+	 * BCM84823 requires that XGXS links up first @ 10G for normal behavior
+	 */
 	temp = vars->line_speed;
 	vars->line_speed = SPEED_10000;
 	bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
@@ -6062,7 +6256,7 @@
 	/* Set dual-media configuration according to configuration */
 
 	bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-			MDIO_CTL_REG_84823_MEDIA, &val);
+			MDIO_CTL_REG_84823_MEDIA + adj, &val);
 	val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
 		 MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
 		 MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
@@ -6095,7 +6289,7 @@
 		val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
 
 	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-			 MDIO_CTL_REG_84823_MEDIA, val);
+			 MDIO_CTL_REG_84823_MEDIA + adj, val);
 	DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
 		   params->multi_phy_config, val);
 
@@ -6103,29 +6297,50 @@
 		rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
 	else
 		bnx2x_save_848xx_spirom_version(phy, params);
+	cms_enable = REG_RD(bp, params->shmem_base +
+			offsetof(struct shmem_region,
+			dev_info.port_hw_config[params->port].default_cfg)) &
+			PORT_HW_CFG_ENABLE_CMS_MASK;
+
+	bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+		MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
+	if (cms_enable)
+		val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
+	else
+		val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
+	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+		MDIO_CTL_REG_84823_USER_CTRL_REG, val);
+
+
 	return rc;
 }
 
 static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
-				       struct link_params *params,
-				       struct link_vars *vars)
+				  struct link_params *params,
+				  struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
-	u16 val, val1, val2;
+	u16 val, val1, val2, adj;
 	u8 link_up = 0;
 
+	/* Reg offset adjustment for 84833 */
+	adj = 0;
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+		adj = -1;
+
 	/* Check 10G-BaseT link status */
 	/* Check PMD signal ok */
 	bnx2x_cl45_read(bp, phy,
 			MDIO_AN_DEVAD, 0xFFFA, &val1);
 	bnx2x_cl45_read(bp, phy,
-			MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL + adj,
 			&val2);
 	DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
 
 	/* Check link 10G */
 	if (val2 & (1<<11)) {
 		vars->line_speed = SPEED_10000;
+		vars->duplex = DUPLEX_FULL;
 		link_up = 1;
 		bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
 	} else { /* Check Legacy speed link */
@@ -6203,9 +6418,9 @@
 				struct link_params *params)
 {
 	bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
-			    MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
 	bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
-			    MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
 }
 
 static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
@@ -6227,8 +6442,8 @@
 	else
 		port = params->port;
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
-			    MISC_REGISTERS_GPIO_OUTPUT_LOW,
-			    port);
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW,
+		       port);
 }
 
 static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
@@ -6283,24 +6498,24 @@
 
 			/* Set LED masks */
 			bnx2x_cl45_write(bp, phy,
-					MDIO_PMA_DEVAD,
-					MDIO_PMA_REG_8481_LED1_MASK,
-					0x0);
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED1_MASK,
+					 0x0);
 
 			bnx2x_cl45_write(bp, phy,
-					MDIO_PMA_DEVAD,
-					MDIO_PMA_REG_8481_LED2_MASK,
-					0x0);
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED2_MASK,
+					 0x0);
 
 			bnx2x_cl45_write(bp, phy,
-					MDIO_PMA_DEVAD,
-					MDIO_PMA_REG_8481_LED3_MASK,
-					0x0);
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED3_MASK,
+					 0x0);
 
 			bnx2x_cl45_write(bp, phy,
-					MDIO_PMA_DEVAD,
-					MDIO_PMA_REG_8481_LED5_MASK,
-					0x20);
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED5_MASK,
+					 0x20);
 
 		} else {
 			bnx2x_cl45_write(bp, phy,
@@ -6324,35 +6539,35 @@
 			val |= 0x2492;
 
 			bnx2x_cl45_write(bp, phy,
-					MDIO_PMA_DEVAD,
-					MDIO_PMA_REG_8481_LINK_SIGNAL,
-					val);
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LINK_SIGNAL,
+					 val);
 
 			/* Set LED masks */
 			bnx2x_cl45_write(bp, phy,
-					MDIO_PMA_DEVAD,
-					MDIO_PMA_REG_8481_LED1_MASK,
-					0x0);
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED1_MASK,
+					 0x0);
 
 			bnx2x_cl45_write(bp, phy,
-					MDIO_PMA_DEVAD,
-					MDIO_PMA_REG_8481_LED2_MASK,
-					0x20);
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED2_MASK,
+					 0x20);
 
 			bnx2x_cl45_write(bp, phy,
-					MDIO_PMA_DEVAD,
-					MDIO_PMA_REG_8481_LED3_MASK,
-					0x20);
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED3_MASK,
+					 0x20);
 
 			bnx2x_cl45_write(bp, phy,
-					MDIO_PMA_DEVAD,
-					MDIO_PMA_REG_8481_LED5_MASK,
-					0x0);
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED5_MASK,
+					 0x0);
 		} else {
 			bnx2x_cl45_write(bp, phy,
-					MDIO_PMA_DEVAD,
-					MDIO_PMA_REG_8481_LED1_MASK,
-					0x20);
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED1_MASK,
+					 0x20);
 		}
 		break;
 
@@ -6370,9 +6585,9 @@
 					&val);
 
 			if (!((val &
-			      MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
-			   >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)){
-				DP(NETIF_MSG_LINK, "Seting LINK_SIGNAL\n");
+			       MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
+			  >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) {
+				DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n");
 				bnx2x_cl45_write(bp, phy,
 						 MDIO_PMA_DEVAD,
 						 MDIO_PMA_REG_8481_LINK_SIGNAL,
@@ -6381,30 +6596,42 @@
 
 			/* Set LED masks */
 			bnx2x_cl45_write(bp, phy,
-					MDIO_PMA_DEVAD,
-					MDIO_PMA_REG_8481_LED1_MASK,
-					0x10);
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED1_MASK,
+					 0x10);
 
 			bnx2x_cl45_write(bp, phy,
-					MDIO_PMA_DEVAD,
-					MDIO_PMA_REG_8481_LED2_MASK,
-					0x80);
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED2_MASK,
+					 0x80);
 
 			bnx2x_cl45_write(bp, phy,
-					MDIO_PMA_DEVAD,
-					MDIO_PMA_REG_8481_LED3_MASK,
-					0x98);
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED3_MASK,
+					 0x98);
 
 			bnx2x_cl45_write(bp, phy,
-					MDIO_PMA_DEVAD,
-					MDIO_PMA_REG_8481_LED5_MASK,
-					0x40);
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED5_MASK,
+					 0x40);
 
 		} else {
 			bnx2x_cl45_write(bp, phy,
 					 MDIO_PMA_DEVAD,
 					 MDIO_PMA_REG_8481_LED1_MASK,
 					 0x80);
+
+			/* Tell LED3 to blink on source */
+			bnx2x_cl45_read(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_8481_LINK_SIGNAL,
+					&val);
+			val &= ~(7<<6);
+			val |= (1<<6); /* A83B[8:6]= 1 */
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LINK_SIGNAL,
+					 val);
 		}
 		break;
 	}
@@ -6431,10 +6658,10 @@
 
 	/* Restore normal power mode*/
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-			    MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
 	/* HW reset */
 	bnx2x_ext_phy_hw_reset(bp, params->port);
-	bnx2x_wait_reset_complete(bp, phy);
+	bnx2x_wait_reset_complete(bp, phy, params);
 
 	bnx2x_cl45_write(bp, phy,
 			 MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 0x1);
@@ -6481,14 +6708,13 @@
 	DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
 		   val2, val1);
 	link_up = ((val1 & 4) == 4);
-	/* if link is up
-	 * print the AN outcome of the SFX7101 PHY
-	 */
+	/* if link is up print the AN outcome of the SFX7101 PHY */
 	if (link_up) {
 		bnx2x_cl45_read(bp, phy,
 				MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
 				&val2);
 		vars->line_speed = SPEED_10000;
+		vars->duplex = DUPLEX_FULL;
 		DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
 			   val2, (val2 & (1<<14)));
 		bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
@@ -6516,20 +6742,20 @@
 	u16 val, cnt;
 
 	bnx2x_cl45_read(bp, phy,
-		      MDIO_PMA_DEVAD,
-		      MDIO_PMA_REG_7101_RESET, &val);
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_7101_RESET, &val);
 
 	for (cnt = 0; cnt < 10; cnt++) {
 		msleep(50);
 		/* Writes a self-clearing reset */
 		bnx2x_cl45_write(bp, phy,
-			       MDIO_PMA_DEVAD,
-			       MDIO_PMA_REG_7101_RESET,
-			       (val | (1<<15)));
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_7101_RESET,
+				 (val | (1<<15)));
 		/* Wait for clear */
 		bnx2x_cl45_read(bp, phy,
-			      MDIO_PMA_DEVAD,
-			      MDIO_PMA_REG_7101_RESET, &val);
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_7101_RESET, &val);
 
 		if ((val & (1<<15)) == 0)
 			break;
@@ -6540,10 +6766,10 @@
 				struct link_params *params) {
 	/* Low power mode is controlled by GPIO 2 */
 	bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
-			    MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
 	/* The PHY reset is controlled by GPIO 1 */
 	bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
-			    MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
 }
 
 static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
@@ -6585,9 +6811,9 @@
 	.supported	= 0,
 	.media_type	= ETH_PHY_NOT_PRESENT,
 	.ver_addr	= 0,
-	.req_flow_ctrl  = 0,
-	.req_line_speed = 0,
-	.speed_cap_mask = 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
 	.req_duplex	= 0,
 	.rsrv		= 0,
 	.config_init	= (config_init_t)NULL,
@@ -6622,8 +6848,8 @@
 	.media_type	= ETH_PHY_UNSPECIFIED,
 	.ver_addr	= 0,
 	.req_flow_ctrl	= 0,
-	.req_line_speed = 0,
-	.speed_cap_mask = 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
 	.req_duplex	= 0,
 	.rsrv		= 0,
 	.config_init	= (config_init_t)bnx2x_init_serdes,
@@ -6659,8 +6885,8 @@
 	.media_type	= ETH_PHY_UNSPECIFIED,
 	.ver_addr	= 0,
 	.req_flow_ctrl	= 0,
-	.req_line_speed = 0,
-	.speed_cap_mask = 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
 	.req_duplex	= 0,
 	.rsrv		= 0,
 	.config_init	= (config_init_t)bnx2x_init_xgxs,
@@ -6690,8 +6916,8 @@
 	.media_type	= ETH_PHY_BASE_T,
 	.ver_addr	= 0,
 	.req_flow_ctrl	= 0,
-	.req_line_speed = 0,
-	.speed_cap_mask = 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
 	.req_duplex	= 0,
 	.rsrv		= 0,
 	.config_init	= (config_init_t)bnx2x_7101_config_init,
@@ -6721,9 +6947,9 @@
 			   SUPPORTED_Asym_Pause),
 	.media_type	= ETH_PHY_UNSPECIFIED,
 	.ver_addr	= 0,
-	.req_flow_ctrl  = 0,
-	.req_line_speed = 0,
-	.speed_cap_mask = 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
 	.req_duplex	= 0,
 	.rsrv		= 0,
 	.config_init	= (config_init_t)bnx2x_8073_config_init,
@@ -6932,6 +7158,43 @@
 	.phy_specific_func = (phy_specific_func_t)NULL
 };
 
+static struct bnx2x_phy phy_84833 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
+	.addr		= 0xff,
+	.flags		= FLAGS_FAN_FAILURE_DET_REQ |
+			    FLAGS_REARM_LATCH_SIGNAL,
+	.def_md_devad	= 0,
+	.reserved	= 0,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10baseT_Half |
+			   SUPPORTED_10baseT_Full |
+			   SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_10000baseT_Full |
+			   SUPPORTED_TP |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_BASE_T,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_848x3_config_init,
+	.read_status	= (read_status_t)bnx2x_848xx_read_status,
+	.link_reset	= (link_reset_t)bnx2x_848x3_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_848xx_format_ver,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)bnx2x_848xx_set_link_led,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+
 /*****************************************************************/
 /*                                                               */
 /* Populate the phy according. Main function: bnx2x_populate_phy   */
@@ -6945,7 +7208,7 @@
 	/* Get the 4 lanes xgxs config rx and tx */
 	u32 rx = 0, tx = 0, i;
 	for (i = 0; i < 2; i++) {
-		/**
+		/*
 		 * INT_PHY and EXT_PHY1 share the same value location in the
 		 * shmem. When num_phys is greater than 1, than this value
 		 * applies only to EXT_PHY1
@@ -6953,19 +7216,19 @@
 		if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
 			rx = REG_RD(bp, shmem_base +
 				    offsetof(struct shmem_region,
-			   dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
+			  dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
 
 			tx = REG_RD(bp, shmem_base +
 				    offsetof(struct shmem_region,
-			   dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
+			  dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
 		} else {
 			rx = REG_RD(bp, shmem_base +
 				    offsetof(struct shmem_region,
-			  dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+			 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
 
 			tx = REG_RD(bp, shmem_base +
 				    offsetof(struct shmem_region,
-			  dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+			 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
 		}
 
 		phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
@@ -7085,6 +7348,9 @@
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
 		*phy = phy_84823;
 		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+		*phy = phy_84833;
+		break;
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
 		*phy = phy_7101;
 		break;
@@ -7099,21 +7365,21 @@
 	phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
 	bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
 
-	/**
-	* The shmem address of the phy version is located on different
-	* structures. In case this structure is too old, do not set
-	* the address
-	*/
+	/*
+	 * The shmem address of the phy version is located on different
+	 * structures. In case this structure is too old, do not set
+	 * the address
+	 */
 	config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
 					dev_info.shared_hw_config.config2));
 	if (phy_index == EXT_PHY1) {
 		phy->ver_addr = shmem_base + offsetof(struct shmem_region,
 				port_mb[port].ext_phy_fw_version);
 
-	/* Check specific mdc mdio settings */
-	if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
-		mdc_mdio_access = config2 &
-		SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
+		/* Check specific mdc mdio settings */
+		if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
+			mdc_mdio_access = config2 &
+			SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
 	} else {
 		u32 size = REG_RD(bp, shmem2_base);
 
@@ -7132,7 +7398,7 @@
 	}
 	phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
 
-	/**
+	/*
 	 * In case mdc/mdio_access of the external phy is different than the
 	 * mdc/mdio access of the XGXS, a HW lock must be taken in each access
 	 * to prevent one port interfere with another port's CL45 operations.
@@ -7167,18 +7433,20 @@
 	/* Populate the default phy configuration for MF mode */
 	if (phy_index == EXT_PHY2) {
 		link_config = REG_RD(bp, params->shmem_base +
-					 offsetof(struct shmem_region, dev_info.
+				     offsetof(struct shmem_region, dev_info.
 			port_feature_config[params->port].link_config2));
 		phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
-					offsetof(struct shmem_region, dev_info.
+					     offsetof(struct shmem_region,
+						      dev_info.
 			port_hw_config[params->port].speed_capability_mask2));
 	} else {
 		link_config = REG_RD(bp, params->shmem_base +
-				offsetof(struct shmem_region, dev_info.
+				     offsetof(struct shmem_region, dev_info.
 				port_feature_config[params->port].link_config));
 		phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
-				offsetof(struct shmem_region, dev_info.
-			   port_hw_config[params->port].speed_capability_mask));
+					     offsetof(struct shmem_region,
+						      dev_info.
+			port_hw_config[params->port].speed_capability_mask));
 	}
 	DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask"
 		       " 0x%x\n", phy_index, link_config, phy->speed_cap_mask);
@@ -7325,7 +7593,7 @@
 			else if (phy_index == EXT_PHY2)
 				actual_phy_idx = EXT_PHY1;
 		}
-		params->phy[actual_phy_idx].req_flow_ctrl  =
+		params->phy[actual_phy_idx].req_flow_ctrl =
 			params->req_flow_ctrl[link_cfg_idx];
 
 		params->phy[actual_phy_idx].req_line_speed =
@@ -7378,57 +7646,6 @@
 	set_phy_vars(params);
 
 	DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
-	if (CHIP_REV_IS_FPGA(bp)) {
-
-		vars->link_up = 1;
-		vars->line_speed = SPEED_10000;
-		vars->duplex = DUPLEX_FULL;
-		vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
-		vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
-		/* enable on E1.5 FPGA */
-		if (CHIP_IS_E1H(bp)) {
-			vars->flow_ctrl |=
-					(BNX2X_FLOW_CTRL_TX |
-					 BNX2X_FLOW_CTRL_RX);
-			vars->link_status |=
-					(LINK_STATUS_TX_FLOW_CONTROL_ENABLED |
-					 LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
-		}
-
-		bnx2x_emac_enable(params, vars, 0);
-		if (!(CHIP_IS_E2(bp)))
-			bnx2x_pbf_update(params, vars->flow_ctrl,
-					 vars->line_speed);
-		/* disable drain */
-		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
-
-		/* update shared memory */
-		bnx2x_update_mng(params, vars->link_status);
-
-		return 0;
-
-	} else
-	if (CHIP_REV_IS_EMUL(bp)) {
-
-		vars->link_up = 1;
-		vars->line_speed = SPEED_10000;
-		vars->duplex = DUPLEX_FULL;
-		vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
-		vars->link_status = (LINK_STATUS_LINK_UP | LINK_10GTFD);
-
-		bnx2x_bmac_enable(params, vars, 0);
-
-		bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed);
-		/* Disable drain */
-		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE
-				    + params->port*4, 0);
-
-		/* update shared memory */
-		bnx2x_update_mng(params, vars->link_status);
-
-		return 0;
-
-	} else
 	if (params->loopback_mode == LOOPBACK_BMAC) {
 
 		vars->link_up = 1;
@@ -7444,8 +7661,7 @@
 		/* set bmac loopback */
 		bnx2x_bmac_enable(params, vars, 1);
 
-		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
-		    params->port*4, 0);
+		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 
 	} else if (params->loopback_mode == LOOPBACK_EMAC) {
 
@@ -7461,8 +7677,7 @@
 		/* set bmac loopback */
 		bnx2x_emac_enable(params, vars, 1);
 		bnx2x_emac_program(params, vars);
-		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
-		    params->port*4, 0);
+		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 
 	} else if ((params->loopback_mode == LOOPBACK_XGXS) ||
 		   (params->loopback_mode == LOOPBACK_EXT_PHY)) {
@@ -7485,8 +7700,7 @@
 			bnx2x_emac_program(params, vars);
 			bnx2x_emac_enable(params, vars, 0);
 		} else
-		bnx2x_bmac_enable(params, vars, 0);
-
+			bnx2x_bmac_enable(params, vars, 0);
 		if (params->loopback_mode == LOOPBACK_XGXS) {
 			/* set 10G XGXS loopback */
 			params->phy[INT_PHY].config_loopback(
@@ -7504,9 +7718,7 @@
 						params);
 			}
 		}
-
-		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
-			    params->port*4, 0);
+		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
 
 		bnx2x_set_led(params, vars,
 			      LED_MODE_OPER, vars->line_speed);
@@ -7525,7 +7737,7 @@
 	return 0;
 }
 u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
-		  u8 reset_ext_phy)
+		    u8 reset_ext_phy)
 {
 	struct bnx2x *bp = params->bp;
 	u8 phy_index, port = params->port, clear_latch_ind = 0;
@@ -7534,10 +7746,10 @@
 	vars->link_status = 0;
 	bnx2x_update_mng(params, vars->link_status);
 	bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
-		     (NIG_MASK_XGXS0_LINK_STATUS |
-		      NIG_MASK_XGXS0_LINK10G |
-		      NIG_MASK_SERDES0_LINK_STATUS |
-		      NIG_MASK_MI_INT));
+		       (NIG_MASK_XGXS0_LINK_STATUS |
+			NIG_MASK_XGXS0_LINK10G |
+			NIG_MASK_SERDES0_LINK_STATUS |
+			NIG_MASK_MI_INT));
 
 	/* activate nig drain */
 	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
@@ -7605,10 +7817,13 @@
 	struct bnx2x_phy phy[PORT_MAX];
 	struct bnx2x_phy *phy_blk[PORT_MAX];
 	u16 val;
-	s8 port;
+	s8 port = 0;
 	s8 port_of_path = 0;
-
-	bnx2x_ext_phy_hw_reset(bp, 0);
+	u32 swap_val, swap_override;
+	swap_val = REG_RD(bp,  NIG_REG_PORT_SWAP);
+	swap_override = REG_RD(bp,  NIG_REG_STRAP_OVERRIDE);
+	port ^= (swap_val && swap_override);
+	bnx2x_ext_phy_hw_reset(bp, port);
 	/* PART1 - Reset both phys */
 	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
 		u32 shmem_base, shmem2_base;
@@ -7633,21 +7848,22 @@
 		/* disable attentions */
 		bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
 			       port_of_path*4,
-			     (NIG_MASK_XGXS0_LINK_STATUS |
-			      NIG_MASK_XGXS0_LINK10G |
-			      NIG_MASK_SERDES0_LINK_STATUS |
-			      NIG_MASK_MI_INT));
+			       (NIG_MASK_XGXS0_LINK_STATUS |
+				NIG_MASK_XGXS0_LINK10G |
+				NIG_MASK_SERDES0_LINK_STATUS |
+				NIG_MASK_MI_INT));
 
 		/* Need to take the phy out of low power mode in order
 			to write to access its registers */
 		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-				  MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+			       MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+			       port);
 
 		/* Reset the phy */
 		bnx2x_cl45_write(bp, &phy[port],
-			       MDIO_PMA_DEVAD,
-			       MDIO_PMA_REG_CTRL,
-			       1<<15);
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_CTRL,
+				 1<<15);
 	}
 
 	/* Add delay of 150ms after reset */
@@ -7663,7 +7879,6 @@
 
 	/* PART2 - Download firmware to both phys */
 	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
-		u16 fw_ver1;
 		if (CHIP_IS_E2(bp))
 			port_of_path = 0;
 		else
@@ -7671,34 +7886,26 @@
 
 		DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
 			   phy_blk[port]->addr);
-		bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
-						  port_of_path);
-
-		bnx2x_cl45_read(bp, phy_blk[port],
-			      MDIO_PMA_DEVAD,
-			      MDIO_PMA_REG_ROM_VER1, &fw_ver1);
-		if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
-			DP(NETIF_MSG_LINK,
-				 "bnx2x_8073_common_init_phy port %x:"
-				 "Download failed. fw version = 0x%x\n",
-				 port, fw_ver1);
+		if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+						      port_of_path))
 			return -EINVAL;
-		}
 
 		/* Only set bit 10 = 1 (Tx power down) */
 		bnx2x_cl45_read(bp, phy_blk[port],
-			      MDIO_PMA_DEVAD,
-			      MDIO_PMA_REG_TX_POWER_DOWN, &val);
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_TX_POWER_DOWN, &val);
 
 		/* Phase1 of TX_POWER_DOWN reset */
 		bnx2x_cl45_write(bp, phy_blk[port],
-			       MDIO_PMA_DEVAD,
-			       MDIO_PMA_REG_TX_POWER_DOWN,
-			       (val | 1<<10));
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_TX_POWER_DOWN,
+				 (val | 1<<10));
 	}
 
-	/* Toggle Transmitter: Power down and then up with 600ms
-	   delay between */
+	/*
+	 * Toggle Transmitter: Power down and then up with 600ms delay
+	 * between
+	 */
 	msleep(600);
 
 	/* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
@@ -7706,25 +7913,25 @@
 		/* Phase2 of POWER_DOWN_RESET */
 		/* Release bit 10 (Release Tx power down) */
 		bnx2x_cl45_read(bp, phy_blk[port],
-			      MDIO_PMA_DEVAD,
-			      MDIO_PMA_REG_TX_POWER_DOWN, &val);
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_TX_POWER_DOWN, &val);
 
 		bnx2x_cl45_write(bp, phy_blk[port],
-			       MDIO_PMA_DEVAD,
-			       MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
 		msleep(15);
 
 		/* Read modify write the SPI-ROM version select register */
 		bnx2x_cl45_read(bp, phy_blk[port],
-			      MDIO_PMA_DEVAD,
-			      MDIO_PMA_REG_EDC_FFE_MAIN, &val);
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_EDC_FFE_MAIN, &val);
 		bnx2x_cl45_write(bp, phy_blk[port],
-			      MDIO_PMA_DEVAD,
-			      MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
 
 		/* set GPIO2 back to LOW */
 		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
-				  MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+			       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
 	}
 	return 0;
 }
@@ -7771,32 +7978,90 @@
 
 		/* Set fault module detected LED on */
 		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
-				  MISC_REGISTERS_GPIO_HIGH,
-				  port);
+			       MISC_REGISTERS_GPIO_HIGH,
+			       port);
 	}
 
 	return 0;
 }
+static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base,
+					 u8 *io_gpio, u8 *io_port)
+{
+
+	u32 phy_gpio_reset = REG_RD(bp, shmem_base +
+					  offsetof(struct shmem_region,
+				dev_info.port_hw_config[PORT_0].default_cfg));
+	switch (phy_gpio_reset) {
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
+		*io_gpio = 0;
+		*io_port = 0;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0:
+		*io_gpio = 1;
+		*io_port = 0;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0:
+		*io_gpio = 2;
+		*io_port = 0;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0:
+		*io_gpio = 3;
+		*io_port = 0;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1:
+		*io_gpio = 0;
+		*io_port = 1;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1:
+		*io_gpio = 1;
+		*io_port = 1;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1:
+		*io_gpio = 2;
+		*io_port = 1;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1:
+		*io_gpio = 3;
+		*io_port = 1;
+		break;
+	default:
+		/* Don't override the io_gpio and io_port */
+		break;
+	}
+}
 static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
 				     u32 shmem_base_path[],
 				     u32 shmem2_base_path[], u8 phy_index,
 				     u32 chip_id)
 {
-	s8 port;
+	s8 port, reset_gpio;
 	u32 swap_val, swap_override;
 	struct bnx2x_phy phy[PORT_MAX];
 	struct bnx2x_phy *phy_blk[PORT_MAX];
 	s8 port_of_path;
-	swap_val = REG_RD(bp,  NIG_REG_PORT_SWAP);
-	swap_override = REG_RD(bp,  NIG_REG_STRAP_OVERRIDE);
+	swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+	swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
 
+	reset_gpio = MISC_REGISTERS_GPIO_1;
 	port = 1;
 
-	bnx2x_ext_phy_hw_reset(bp, port ^ (swap_val && swap_override));
+	/*
+	 * Retrieve the reset gpio/port which control the reset.
+	 * Default is GPIO1, PORT1
+	 */
+	bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
+				     (u8 *)&reset_gpio, (u8 *)&port);
 
 	/* Calculate the port based on port swap */
 	port ^= (swap_val && swap_override);
 
+	/* Initiate PHY reset*/
+	bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
+		       port);
+	msleep(1);
+	bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+		       port);
+
 	msleep(5);
 
 	/* PART1 - Reset both phys */
@@ -7832,9 +8097,7 @@
 
 		/* Reset the phy */
 		bnx2x_cl45_write(bp, &phy[port],
-			       MDIO_PMA_DEVAD,
-			       MDIO_PMA_REG_CTRL,
-			       1<<15);
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
 	}
 
 	/* Add delay of 150ms after reset */
@@ -7848,27 +8111,17 @@
 	}
 	/* PART2 - Download firmware to both phys */
 	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
-		u16 fw_ver1;
-		 if (CHIP_IS_E2(bp))
+		if (CHIP_IS_E2(bp))
 			port_of_path = 0;
 		else
 			port_of_path = port;
 		DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
 			   phy_blk[port]->addr);
-		bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
-						  port_of_path);
-		bnx2x_cl45_read(bp, phy_blk[port],
-			      MDIO_PMA_DEVAD,
-			      MDIO_PMA_REG_ROM_VER1, &fw_ver1);
-		if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
-			DP(NETIF_MSG_LINK,
-				 "bnx2x_8727_common_init_phy port %x:"
-				 "Download failed. fw version = 0x%x\n",
-				 port, fw_ver1);
+		if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+						      port_of_path))
 			return -EINVAL;
-		}
-	}
 
+	}
 	return 0;
 }
 
@@ -7893,8 +8146,10 @@
 		break;
 
 	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
-		/* GPIO1 affects both ports, so there's need to pull
-		it for single port alone */
+		/*
+		 * GPIO1 affects both ports, so there's need to pull
+		 * it for single port alone
+		 */
 		rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
 						shmem2_base_path,
 						phy_index, chip_id);
@@ -7904,11 +8159,15 @@
 		break;
 	default:
 		DP(NETIF_MSG_LINK,
-			 "bnx2x_common_init_phy: ext_phy 0x%x not required\n",
-			 ext_phy_type);
+			   "ext_phy 0x%x common init not required\n",
+			   ext_phy_type);
 		break;
 	}
 
+	if (rc != 0)
+		netdev_err(bp->dev,  "Warning: PHY was not initialized,"
+				      " Port %d\n",
+			 0);
 	return rc;
 }
 
@@ -7916,12 +8175,20 @@
 			 u32 shmem2_base_path[], u32 chip_id)
 {
 	u8 rc = 0;
+	u32 phy_ver;
 	u8 phy_index;
 	u32 ext_phy_type, ext_phy_config;
 	DP(NETIF_MSG_LINK, "Begin common phy init\n");
 
-	if (CHIP_REV_IS_EMUL(bp))
+	/* Check if common init was already done */
+	phy_ver = REG_RD(bp, shmem_base_path[0] +
+			 offsetof(struct shmem_region,
+				  port_mb[PORT_0].ext_phy_fw_version));
+	if (phy_ver) {
+		DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n",
+			       phy_ver);
 		return 0;
+	}
 
 	/* Read the ext_phy_type for arbitrary port(0) */
 	for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index bedab1a..92f36b6 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
-/* Copyright 2008-2010 Broadcom Corporation
+/* Copyright 2008-2011 Broadcom Corporation
  *
  * Unless you and Broadcom execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
@@ -33,7 +33,7 @@
 #define BNX2X_FLOW_CTRL_BOTH		PORT_FEATURE_FLOW_CONTROL_BOTH
 #define BNX2X_FLOW_CTRL_NONE		PORT_FEATURE_FLOW_CONTROL_NONE
 
-#define SPEED_AUTO_NEG	    0
+#define SPEED_AUTO_NEG		0
 #define SPEED_12000		12000
 #define SPEED_12500		12500
 #define SPEED_13000		13000
@@ -44,8 +44,8 @@
 #define SFP_EEPROM_VENDOR_NAME_SIZE		16
 #define SFP_EEPROM_VENDOR_OUI_ADDR		0x25
 #define SFP_EEPROM_VENDOR_OUI_SIZE		3
-#define SFP_EEPROM_PART_NO_ADDR 		0x28
-#define SFP_EEPROM_PART_NO_SIZE		16
+#define SFP_EEPROM_PART_NO_ADDR			0x28
+#define SFP_EEPROM_PART_NO_SIZE			16
 #define PWR_FLT_ERR_MSG_LEN			250
 
 #define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@@ -62,7 +62,7 @@
 #define SINGLE_MEDIA(params)		(params->num_phys == 2)
 /* Dual Media board contains two external phy with different media */
 #define DUAL_MEDIA(params)		(params->num_phys == 3)
-#define FW_PARAM_MDIO_CTRL_OFFSET 16
+#define FW_PARAM_MDIO_CTRL_OFFSET		16
 #define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
 	(phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
 
@@ -201,12 +201,14 @@
 
 	/* Default / User Configuration */
 	u8 loopback_mode;
-#define LOOPBACK_NONE	0
-#define LOOPBACK_EMAC	1
-#define LOOPBACK_BMAC	2
+#define LOOPBACK_NONE		0
+#define LOOPBACK_EMAC		1
+#define LOOPBACK_BMAC		2
 #define LOOPBACK_XGXS		3
 #define LOOPBACK_EXT_PHY	4
-#define LOOPBACK_EXT 	5
+#define LOOPBACK_EXT		5
+#define LOOPBACK_UMAC		6
+#define LOOPBACK_XMAC		7
 
 	/* Device parameters */
 	u8 mac_addr[6];
@@ -230,10 +232,11 @@
 	/* Phy register parameter */
 	u32 chip_id;
 
+	/* features */
 	u32 feature_config_flags;
-#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
-#define FEATURE_CONFIG_PFC_ENABLED		(1<<1)
-#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY	(1<<2)
+#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED	(1<<0)
+#define FEATURE_CONFIG_PFC_ENABLED			(1<<1)
+#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY		(1<<2)
 #define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY	(1<<3)
 	/* Will be populated during common init */
 	struct bnx2x_phy phy[MAX_PHYS];
@@ -334,6 +337,11 @@
 /* Reset the external of SFX7101 */
 void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
 
+/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
+u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+				struct link_params *params, u16 addr,
+				u8 byte_cnt, u8 *o_buf);
+
 void bnx2x_hw_reset_phy(struct link_params *params);
 
 /* Checks if HW lock is required for this phy/board type */
@@ -379,7 +387,7 @@
 
 /* Used to configure the ETS to BW limited */
 void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
-						const u32 cos1_bw);
+			const u32 cos1_bw);
 
 /* Used to configure the ETS to strict */
 u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 8cdcf5b..6c7745e 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -586,7 +586,7 @@
 	   bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
 
 	/* lock the dmae channel */
-	mutex_lock(&bp->dmae_mutex);
+	spin_lock_bh(&bp->dmae_lock);
 
 	/* reset completion */
 	*wb_comp = 0;
@@ -617,7 +617,7 @@
 	   bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
 
 unlock:
-	mutex_unlock(&bp->dmae_mutex);
+	spin_unlock_bh(&bp->dmae_lock);
 	return rc;
 }
 
@@ -1397,7 +1397,7 @@
 	}
 
 	smp_mb__before_atomic_inc();
-	atomic_inc(&bp->spq_left);
+	atomic_inc(&bp->cq_spq_left);
 	/* push the change in fp->state and towards the memory */
 	smp_wmb();
 
@@ -2301,15 +2301,10 @@
 		/* accept matched ucast */
 		drop_all_ucast = 0;
 	}
-	if (filters & BNX2X_ACCEPT_MULTICAST) {
+	if (filters & BNX2X_ACCEPT_MULTICAST)
 		/* accept matched mcast */
 		drop_all_mcast = 0;
-		if (IS_MF_SI(bp))
-			/* since mcast addresses won't arrive with ovlan,
-			 * fw needs to accept all of them in
-			 * switch-independent mode */
-			accp_all_mcast = 1;
-	}
+
 	if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
 		/* accept all mcast */
 		drop_all_ucast = 0;
@@ -2478,8 +2473,14 @@
 	rxq_init->sge_map = fp->rx_sge_mapping;
 	rxq_init->rcq_map = fp->rx_comp_mapping;
 	rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
-	rxq_init->mtu = bp->dev->mtu;
-	rxq_init->buf_sz = bp->rx_buf_size;
+
+	/* Always use mini-jumbo MTU for FCoE L2 ring */
+	if (IS_FCOE_FP(fp))
+		rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
+	else
+		rxq_init->mtu = bp->dev->mtu;
+
+	rxq_init->buf_sz = fp->rx_buf_size;
 	rxq_init->cl_qzone_id = fp->cl_qzone_id;
 	rxq_init->cl_id = fp->cl_id;
 	rxq_init->spcl_id = fp->cl_id;
@@ -2731,11 +2732,18 @@
 
 	spin_lock_bh(&bp->spq_lock);
 
-	if (!atomic_read(&bp->spq_left)) {
-		BNX2X_ERR("BUG! SPQ ring full!\n");
-		spin_unlock_bh(&bp->spq_lock);
-		bnx2x_panic();
-		return -EBUSY;
+	if (common) {
+		if (!atomic_read(&bp->eq_spq_left)) {
+			BNX2X_ERR("BUG! EQ ring full!\n");
+			spin_unlock_bh(&bp->spq_lock);
+			bnx2x_panic();
+			return -EBUSY;
+		}
+	} else if (!atomic_read(&bp->cq_spq_left)) {
+			BNX2X_ERR("BUG! SPQ ring full!\n");
+			spin_unlock_bh(&bp->spq_lock);
+			bnx2x_panic();
+			return -EBUSY;
 	}
 
 	spe = bnx2x_sp_get_next(bp);
@@ -2766,20 +2774,26 @@
 	spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
 
 	/* stats ramrod has it's own slot on the spq */
-	if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
+	if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
 		/* It's ok if the actual decrement is issued towards the memory
 		 * somewhere between the spin_lock and spin_unlock. Thus no
 		 * more explict memory barrier is needed.
 		 */
-		atomic_dec(&bp->spq_left);
+		if (common)
+			atomic_dec(&bp->eq_spq_left);
+		else
+			atomic_dec(&bp->cq_spq_left);
+	}
+
 
 	DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
 	   "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x) "
-	   "type(0x%x) left %x\n",
+	   "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
 	   bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
 	   (u32)(U64_LO(bp->spq_mapping) +
 	   (void *)bp->spq_prod_bd - (void *)bp->spq), command,
-	   HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
+	   HW_CID(bp, cid), data_hi, data_lo, type,
+	   atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
 
 	bnx2x_sp_prod_update(bp);
 	spin_unlock_bh(&bp->spq_lock);
@@ -3691,8 +3705,8 @@
 	sw_cons = bp->eq_cons;
 	sw_prod = bp->eq_prod;
 
-	DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->spq_left %u\n",
-			hw_cons, sw_cons, atomic_read(&bp->spq_left));
+	DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->cq_spq_left %u\n",
+			hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
 
 	for (; sw_cons != hw_cons;
 	      sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
@@ -3757,13 +3771,15 @@
 		case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
 		case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
 			DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
-			bp->set_mac_pending = 0;
+			if (elem->message.data.set_mac_event.echo)
+				bp->set_mac_pending = 0;
 			break;
 
 		case (EVENT_RING_OPCODE_SET_MAC |
 		      BNX2X_STATE_CLOSING_WAIT4_HALT):
 			DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
-			bp->set_mac_pending = 0;
+			if (elem->message.data.set_mac_event.echo)
+				bp->set_mac_pending = 0;
 			break;
 		default:
 			/* unknown event log error and continue */
@@ -3775,7 +3791,7 @@
 	} /* for */
 
 	smp_mb__before_atomic_inc();
-	atomic_add(spqe_cnt, &bp->spq_left);
+	atomic_add(spqe_cnt, &bp->eq_spq_left);
 
 	bp->eq_cons = sw_cons;
 	bp->eq_prod = sw_prod;
@@ -4208,7 +4224,7 @@
 static void bnx2x_init_sp_ring(struct bnx2x *bp)
 {
 	spin_lock_init(&bp->spq_lock);
-	atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
+	atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
 
 	bp->spq_prod_idx = 0;
 	bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
@@ -4233,9 +4249,12 @@
 	bp->eq_cons = 0;
 	bp->eq_prod = NUM_EQ_DESC;
 	bp->eq_cons_sb = BNX2X_EQ_INDEX;
+	/* we want a warning message before it gets rought... */
+	atomic_set(&bp->eq_spq_left,
+		min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
 }
 
-static void bnx2x_init_ind_table(struct bnx2x *bp)
+void bnx2x_push_indir_table(struct bnx2x *bp)
 {
 	int func = BP_FUNC(bp);
 	int i;
@@ -4243,13 +4262,20 @@
 	if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
 		return;
 
-	DP(NETIF_MSG_IFUP,
-	   "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
 	for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
 		REG_WR8(bp, BAR_TSTRORM_INTMEM +
 			TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
-			bp->fp->cl_id + (i % (bp->num_queues -
-				NONE_ETH_CONTEXT_USE)));
+			bp->fp->cl_id + bp->rx_indir_table[i]);
+}
+
+static void bnx2x_init_ind_table(struct bnx2x *bp)
+{
+	int i;
+
+	for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
+		bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
+
+	bnx2x_push_indir_table(bp);
 }
 
 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
@@ -4281,9 +4307,12 @@
 		def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
 				BNX2X_ACCEPT_MULTICAST;
 #ifdef BCM_CNIC
-		cl_id = bnx2x_fcoe(bp, cl_id);
-		bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
-					  BNX2X_ACCEPT_MULTICAST);
+		if (!NO_FCOE(bp)) {
+			cl_id = bnx2x_fcoe(bp, cl_id);
+			bnx2x_rxq_set_mac_filters(bp, cl_id,
+						  BNX2X_ACCEPT_UNICAST |
+						  BNX2X_ACCEPT_MULTICAST);
+		}
 #endif
 		break;
 
@@ -4291,18 +4320,29 @@
 		def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
 				BNX2X_ACCEPT_ALL_MULTICAST;
 #ifdef BCM_CNIC
-		cl_id = bnx2x_fcoe(bp, cl_id);
-		bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
-					  BNX2X_ACCEPT_MULTICAST);
+		/*
+		 *  Prevent duplication of multicast packets by configuring FCoE
+		 *  L2 Client to receive only matched unicast frames.
+		 */
+		if (!NO_FCOE(bp)) {
+			cl_id = bnx2x_fcoe(bp, cl_id);
+			bnx2x_rxq_set_mac_filters(bp, cl_id,
+						  BNX2X_ACCEPT_UNICAST);
+		}
 #endif
 		break;
 
 	case BNX2X_RX_MODE_PROMISC:
 		def_q_filters |= BNX2X_PROMISCUOUS_MODE;
 #ifdef BCM_CNIC
-		cl_id = bnx2x_fcoe(bp, cl_id);
-		bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
-					  BNX2X_ACCEPT_MULTICAST);
+		/*
+		 *  Prevent packets duplication by configuring DROP_ALL for FCoE
+		 *  L2 Client.
+		 */
+		if (!NO_FCOE(bp)) {
+			cl_id = bnx2x_fcoe(bp, cl_id);
+			bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
+		}
 #endif
 		/* pass management unicast packets as well */
 		llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
@@ -5296,10 +5336,6 @@
 		}
 	}
 
-	bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
-						       bp->common.shmem_base,
-						       bp->common.shmem2_base);
-
 	bnx2x_setup_fan_failure_detection(bp);
 
 	/* clear PXP2 attentions */
@@ -5503,9 +5539,6 @@
 
 	bnx2x_init_block(bp, MCP_BLOCK, init_stage);
 	bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
-	bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
-						       bp->common.shmem_base,
-						       bp->common.shmem2_base);
 	if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
 				      bp->common.shmem2_base, port)) {
 		u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
@@ -5838,7 +5871,7 @@
 	   BP_ABS_FUNC(bp), load_code);
 
 	bp->dmae_ready = 0;
-	mutex_init(&bp->dmae_mutex);
+	spin_lock_init(&bp->dmae_lock);
 	rc = bnx2x_gunzip_init(bp);
 	if (rc)
 		return rc;
@@ -5990,6 +6023,8 @@
 	BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
 		       BCM_PAGE_SIZE * NUM_EQ_PAGES);
 
+	BNX2X_FREE(bp->rx_indir_table);
+
 #undef BNX2X_PCI_FREE
 #undef BNX2X_KFREE
 }
@@ -6120,6 +6155,9 @@
 	/* EQ */
 	BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
 			BCM_PAGE_SIZE * NUM_EQ_PAGES);
+
+	BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
+		    TSTORM_INDIRECTION_TABLE_SIZE);
 	return 0;
 
 alloc_mem_err:
@@ -6173,12 +6211,14 @@
 	int ramrod_flags = WAIT_RAMROD_COMMON;
 
 	bp->set_mac_pending = 1;
-	smp_wmb();
 
 	config->hdr.length = 1;
 	config->hdr.offset = cam_offset;
 	config->hdr.client_id = 0xff;
-	config->hdr.reserved1 = 0;
+	/* Mark the single MAC configuration ramrod as opposed to a
+	 * UC/MC list configuration).
+	 */
+	config->hdr.echo = 1;
 
 	/* primary MAC */
 	config->config_table[0].msb_mac_addr =
@@ -6210,6 +6250,8 @@
 	   config->config_table[0].middle_mac_addr,
 	   config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
 
+	mb();
+
 	bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
 		      U64_HI(bnx2x_sp_mapping(bp, mac_config)),
 		      U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
@@ -6274,20 +6316,15 @@
 	if (CHIP_IS_E1H(bp))
 		return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
 	else if (CHIP_MODE_IS_4_PORT(bp))
-		return BP_FUNC(bp) * 32  + rel_offset;
+		return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
 	else
-		return BP_VN(bp) * 32  + rel_offset;
+		return E2_FUNC_MAX * rel_offset + BP_VN(bp);
 }
 
 /**
  *  LLH CAM line allocations: currently only iSCSI and ETH macs are
  *  relevant. In addition, current implementation is tuned for a
  *  single ETH MAC.
- *
- *  When multiple unicast ETH MACs PF configuration in switch
- *  independent mode is required (NetQ, multiple netdev MACs,
- *  etc.), consider better utilisation of 16 per function MAC
- *  entries in the LLH memory.
  */
 enum {
 	LLH_CAM_ISCSI_ETH_LINE = 0,
@@ -6362,14 +6399,37 @@
 		bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
 	}
 }
-static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
+
+static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
+{
+	return CHIP_REV_IS_SLOW(bp) ?
+		(BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
+		(BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
+}
+
+/* set mc list, do not wait as wait implies sleep and
+ * set_rx_mode can be invoked from non-sleepable context.
+ *
+ * Instead we use the same ramrod data buffer each time we need
+ * to configure a list of addresses, and use the fact that the
+ * list of MACs is changed in an incremental way and that the
+ * function is called under the netif_addr_lock. A temporary
+ * inconsistent CAM configuration (possible in case of a very fast
+ * sequence of add/del/add on the host side) will shortly be
+ * restored by the handler of the last ramrod.
+ */
+static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
 {
 	int i = 0, old;
 	struct net_device *dev = bp->dev;
+	u8 offset = bnx2x_e1_cam_mc_offset(bp);
 	struct netdev_hw_addr *ha;
 	struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
 	dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
 
+	if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
+		return -EINVAL;
+
 	netdev_for_each_mc_addr(ha, dev) {
 		/* copy mac */
 		config_cmd->config_table[i].msb_mac_addr =
@@ -6410,32 +6470,47 @@
 		}
 	}
 
+	wmb();
+
 	config_cmd->hdr.length = i;
 	config_cmd->hdr.offset = offset;
 	config_cmd->hdr.client_id = 0xff;
-	config_cmd->hdr.reserved1 = 0;
+	/* Mark that this ramrod doesn't use bp->set_mac_pending for
+	 * synchronization.
+	 */
+	config_cmd->hdr.echo = 0;
 
-	bp->set_mac_pending = 1;
-	smp_wmb();
+	mb();
 
-	bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
 		   U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
 }
-static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
+
+void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
 {
 	int i;
 	struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
 	dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
 	int ramrod_flags = WAIT_RAMROD_COMMON;
+	u8 offset = bnx2x_e1_cam_mc_offset(bp);
 
-	bp->set_mac_pending = 1;
-	smp_wmb();
-
-	for (i = 0; i < config_cmd->hdr.length; i++)
+	for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
 		SET_FLAG(config_cmd->config_table[i].flags,
 			MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
 			T_ETH_MAC_COMMAND_INVALIDATE);
 
+	wmb();
+
+	config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
+	config_cmd->hdr.offset = offset;
+	config_cmd->hdr.client_id = 0xff;
+	/* We'll wait for a completion this time... */
+	config_cmd->hdr.echo = 1;
+
+	bp->set_mac_pending = 1;
+
+	mb();
+
 	bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
 		      U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
 
@@ -6445,6 +6520,44 @@
 
 }
 
+/* Accept one or more multicasts */
+static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
+{
+	struct net_device *dev = bp->dev;
+	struct netdev_hw_addr *ha;
+	u32 mc_filter[MC_HASH_SIZE];
+	u32 crc, bit, regidx;
+	int i;
+
+	memset(mc_filter, 0, 4 * MC_HASH_SIZE);
+
+	netdev_for_each_mc_addr(ha, dev) {
+		DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
+		   bnx2x_mc_addr(ha));
+
+		crc = crc32c_le(0, bnx2x_mc_addr(ha),
+				ETH_ALEN);
+		bit = (crc >> 24) & 0xff;
+		regidx = bit >> 5;
+		bit &= 0x1f;
+		mc_filter[regidx] |= (1 << bit);
+	}
+
+	for (i = 0; i < MC_HASH_SIZE; i++)
+		REG_WR(bp, MC_HASH_OFFSET(bp, i),
+		       mc_filter[i]);
+
+	return 0;
+}
+
+void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
+{
+	int i;
+
+	for (i = 0; i < MC_HASH_SIZE; i++)
+		REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
+}
+
 #ifdef BCM_CNIC
 /**
  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
@@ -6463,12 +6576,13 @@
 	u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
 		BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
 	u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
+	u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
 
 	/* Send a SET_MAC ramrod */
-	bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
+	bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
 			       cam_offset, 0);
 
-	bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
+	bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
 
 	return 0;
 }
@@ -7110,20 +7224,15 @@
 	/* Give HW time to discard old tx messages */
 	msleep(1);
 
-	if (CHIP_IS_E1(bp)) {
-		/* invalidate mc list,
-		 * wait and poll (interrupts are off)
-		 */
-		bnx2x_invlidate_e1_mc_list(bp);
-		bnx2x_set_eth_mac(bp, 0);
+	bnx2x_set_eth_mac(bp, 0);
 
-	} else {
+	bnx2x_invalidate_uc_list(bp);
+
+	if (CHIP_IS_E1(bp))
+		bnx2x_invalidate_e1_mc_list(bp);
+	else {
+		bnx2x_invalidate_e1h_mc_list(bp);
 		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
-
-		bnx2x_set_eth_mac(bp, 0);
-
-		for (i = 0; i < MC_HASH_SIZE; i++)
-			REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
 	}
 
 #ifdef BCM_CNIC
@@ -8379,13 +8488,60 @@
 		 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
 		bp->mdio.prtad =
 			XGXS_EXT_PHY_ADDR(ext_phy_config);
+
+	/*
+	 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
+	 * In MF mode, it is set to cover self test cases
+	 */
+	if (IS_MF(bp))
+		bp->port.need_hw_lock = 1;
+	else
+		bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
+							bp->common.shmem_base,
+							bp->common.shmem2_base);
 }
 
+#ifdef BCM_CNIC
+static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+	u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+				drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
+	u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+				drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
+
+	/* Get the number of maximum allowed iSCSI and FCoE connections */
+	bp->cnic_eth_dev.max_iscsi_conn =
+		(max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
+		BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
+
+	bp->cnic_eth_dev.max_fcoe_conn =
+		(max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
+		BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
+
+	BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
+		       bp->cnic_eth_dev.max_iscsi_conn,
+		       bp->cnic_eth_dev.max_fcoe_conn);
+
+	/* If mamimum allowed number of connections is zero -
+	 * disable the feature.
+	 */
+	if (!bp->cnic_eth_dev.max_iscsi_conn)
+		bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+	if (!bp->cnic_eth_dev.max_fcoe_conn)
+		bp->flags |= NO_FCOE_FLAG;
+}
+#endif
+
 static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
 {
 	u32 val, val2;
 	int func = BP_ABS_FUNC(bp);
 	int port = BP_PORT(bp);
+#ifdef BCM_CNIC
+	u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
+	u8 *fip_mac = bp->fip_mac;
+#endif
 
 	if (BP_NOMCP(bp)) {
 		BNX2X_ERROR("warning: random MAC workaround active\n");
@@ -8398,7 +8554,9 @@
 			bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
 
 #ifdef BCM_CNIC
-		/* iSCSI NPAR MAC */
+		/* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+		 * FCoE MAC then the appropriate feature should be disabled.
+		 */
 		if (IS_MF_SI(bp)) {
 			u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
 			if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
@@ -8406,8 +8564,39 @@
 						     iscsi_mac_addr_upper);
 				val = MF_CFG_RD(bp, func_ext_config[func].
 						    iscsi_mac_addr_lower);
-				bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
-			}
+				BNX2X_DEV_INFO("Read iSCSI MAC: "
+					       "0x%x:0x%04x\n", val2, val);
+				bnx2x_set_mac_buf(iscsi_mac, val, val2);
+
+				/* Disable iSCSI OOO if MAC configuration is
+				 * invalid.
+				 */
+				if (!is_valid_ether_addr(iscsi_mac)) {
+					bp->flags |= NO_ISCSI_OOO_FLAG |
+						     NO_ISCSI_FLAG;
+					memset(iscsi_mac, 0, ETH_ALEN);
+				}
+			} else
+				bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+			if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
+				val2 = MF_CFG_RD(bp, func_ext_config[func].
+						     fcoe_mac_addr_upper);
+				val = MF_CFG_RD(bp, func_ext_config[func].
+						    fcoe_mac_addr_lower);
+				BNX2X_DEV_INFO("Read FCoE MAC to "
+					       "0x%x:0x%04x\n", val2, val);
+				bnx2x_set_mac_buf(fip_mac, val, val2);
+
+				/* Disable FCoE if MAC configuration is
+				 * invalid.
+				 */
+				if (!is_valid_ether_addr(fip_mac)) {
+					bp->flags |= NO_FCOE_FLAG;
+					memset(bp->fip_mac, 0, ETH_ALEN);
+				}
+			} else
+				bp->flags |= NO_FCOE_FLAG;
 		}
 #endif
 	} else {
@@ -8421,7 +8610,7 @@
 				    iscsi_mac_upper);
 		val = SHMEM_RD(bp, dev_info.port_hw_config[port].
 				   iscsi_mac_lower);
-		bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
+		bnx2x_set_mac_buf(iscsi_mac, val, val2);
 #endif
 	}
 
@@ -8429,14 +8618,12 @@
 	memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
 
 #ifdef BCM_CNIC
-	/* Inform the upper layers about FCoE MAC */
+	/* Set the FCoE MAC in modes other then MF_SI */
 	if (!CHIP_IS_E1x(bp)) {
 		if (IS_MF_SD(bp))
-			memcpy(bp->fip_mac, bp->dev->dev_addr,
-			       sizeof(bp->fip_mac));
-		else
-			memcpy(bp->fip_mac, bp->iscsi_mac,
-			       sizeof(bp->fip_mac));
+			memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
+		else if (!IS_MF(bp))
+			memcpy(fip_mac, iscsi_mac, ETH_ALEN);
 	}
 #endif
 }
@@ -8599,6 +8786,10 @@
 	/* Get MAC addresses */
 	bnx2x_get_mac_hwinfo(bp);
 
+#ifdef BCM_CNIC
+	bnx2x_get_cnic_info(bp);
+#endif
+
 	return rc;
 }
 
@@ -8813,12 +9004,197 @@
 	return 0;
 }
 
+#define E1_MAX_UC_LIST	29
+#define E1H_MAX_UC_LIST	30
+#define E2_MAX_UC_LIST	14
+static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
+{
+	if (CHIP_IS_E1(bp))
+		return E1_MAX_UC_LIST;
+	else if (CHIP_IS_E1H(bp))
+		return E1H_MAX_UC_LIST;
+	else
+		return E2_MAX_UC_LIST;
+}
+
+
+static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
+{
+	if (CHIP_IS_E1(bp))
+		/* CAM Entries for Port0:
+		 *      0 - prim ETH MAC
+		 *      1 - BCAST MAC
+		 *      2 - iSCSI L2 ring ETH MAC
+		 *      3-31 - UC MACs
+		 *
+		 * Port1 entries are allocated the same way starting from
+		 * entry 32.
+		 */
+		return 3 + 32 * BP_PORT(bp);
+	else if (CHIP_IS_E1H(bp)) {
+		/* CAM Entries:
+		 *      0-7  - prim ETH MAC for each function
+		 *      8-15 - iSCSI L2 ring ETH MAC for each function
+		 *      16 till 255 UC MAC lists for each function
+		 *
+		 * Remark: There is no FCoE support for E1H, thus FCoE related
+		 *         MACs are not considered.
+		 */
+		return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
+			bnx2x_max_uc_list(bp) * BP_FUNC(bp);
+	} else {
+		/* CAM Entries (there is a separate CAM per engine):
+		 *      0-4  - prim ETH MAC for each function
+		 *      4-7 - iSCSI L2 ring ETH MAC for each function
+		 *      8-11 - FIP ucast L2 MAC for each function
+		 *      12-15 - ALL_ENODE_MACS mcast MAC for each function
+		 *      16 till 71 UC MAC lists for each function
+		 */
+		u8 func_idx =
+			(CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
+
+		return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
+			bnx2x_max_uc_list(bp) * func_idx;
+	}
+}
+
+/* set uc list, do not wait as wait implies sleep and
+ * set_rx_mode can be invoked from non-sleepable context.
+ *
+ * Instead we use the same ramrod data buffer each time we need
+ * to configure a list of addresses, and use the fact that the
+ * list of MACs is changed in an incremental way and that the
+ * function is called under the netif_addr_lock. A temporary
+ * inconsistent CAM configuration (possible in case of very fast
+ * sequence of add/del/add on the host side) will shortly be
+ * restored by the handler of the last ramrod.
+ */
+static int bnx2x_set_uc_list(struct bnx2x *bp)
+{
+	int i = 0, old;
+	struct net_device *dev = bp->dev;
+	u8 offset = bnx2x_uc_list_cam_offset(bp);
+	struct netdev_hw_addr *ha;
+	struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
+	dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
+
+	if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
+		return -EINVAL;
+
+	netdev_for_each_uc_addr(ha, dev) {
+		/* copy mac */
+		config_cmd->config_table[i].msb_mac_addr =
+			swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
+		config_cmd->config_table[i].middle_mac_addr =
+			swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
+		config_cmd->config_table[i].lsb_mac_addr =
+			swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
+
+		config_cmd->config_table[i].vlan_id = 0;
+		config_cmd->config_table[i].pf_id = BP_FUNC(bp);
+		config_cmd->config_table[i].clients_bit_vector =
+			cpu_to_le32(1 << BP_L_ID(bp));
+
+		SET_FLAG(config_cmd->config_table[i].flags,
+			MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+			T_ETH_MAC_COMMAND_SET);
+
+		DP(NETIF_MSG_IFUP,
+		   "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
+		   config_cmd->config_table[i].msb_mac_addr,
+		   config_cmd->config_table[i].middle_mac_addr,
+		   config_cmd->config_table[i].lsb_mac_addr);
+
+		i++;
+
+		/* Set uc MAC in NIG */
+		bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
+				     LLH_CAM_ETH_LINE + i);
+	}
+	old = config_cmd->hdr.length;
+	if (old > i) {
+		for (; i < old; i++) {
+			if (CAM_IS_INVALID(config_cmd->
+					   config_table[i])) {
+				/* already invalidated */
+				break;
+			}
+			/* invalidate */
+			SET_FLAG(config_cmd->config_table[i].flags,
+				MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+				T_ETH_MAC_COMMAND_INVALIDATE);
+		}
+	}
+
+	wmb();
+
+	config_cmd->hdr.length = i;
+	config_cmd->hdr.offset = offset;
+	config_cmd->hdr.client_id = 0xff;
+	/* Mark that this ramrod doesn't use bp->set_mac_pending for
+	 * synchronization.
+	 */
+	config_cmd->hdr.echo = 0;
+
+	mb();
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
+		   U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
+
+}
+
+void bnx2x_invalidate_uc_list(struct bnx2x *bp)
+{
+	int i;
+	struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
+	dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
+	int ramrod_flags = WAIT_RAMROD_COMMON;
+	u8 offset = bnx2x_uc_list_cam_offset(bp);
+	u8 max_list_size = bnx2x_max_uc_list(bp);
+
+	for (i = 0; i < max_list_size; i++) {
+		SET_FLAG(config_cmd->config_table[i].flags,
+			MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+			T_ETH_MAC_COMMAND_INVALIDATE);
+		bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
+	}
+
+	wmb();
+
+	config_cmd->hdr.length = max_list_size;
+	config_cmd->hdr.offset = offset;
+	config_cmd->hdr.client_id = 0xff;
+	/* We'll wait for a completion this time... */
+	config_cmd->hdr.echo = 1;
+
+	bp->set_mac_pending = 1;
+
+	mb();
+
+	bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
+		      U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
+
+	/* Wait for a completion */
+	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
+				ramrod_flags);
+
+}
+
+static inline int bnx2x_set_mc_list(struct bnx2x *bp)
+{
+	/* some multicasts */
+	if (CHIP_IS_E1(bp)) {
+		return bnx2x_set_e1_mc_list(bp);
+	} else { /* E1H and newer */
+		return bnx2x_set_e1h_mc_list(bp);
+	}
+}
+
 /* called with netif_tx_lock from dev_mcast.c */
 void bnx2x_set_rx_mode(struct net_device *dev)
 {
 	struct bnx2x *bp = netdev_priv(dev);
 	u32 rx_mode = BNX2X_RX_MODE_NORMAL;
-	int port = BP_PORT(bp);
 
 	if (bp->state != BNX2X_STATE_OPEN) {
 		DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
@@ -8829,47 +9205,16 @@
 
 	if (dev->flags & IFF_PROMISC)
 		rx_mode = BNX2X_RX_MODE_PROMISC;
-	else if ((dev->flags & IFF_ALLMULTI) ||
-		 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
-		  CHIP_IS_E1(bp)))
+	else if (dev->flags & IFF_ALLMULTI)
 		rx_mode = BNX2X_RX_MODE_ALLMULTI;
-	else { /* some multicasts */
-		if (CHIP_IS_E1(bp)) {
-			/*
-			 * set mc list, do not wait as wait implies sleep
-			 * and set_rx_mode can be invoked from non-sleepable
-			 * context
-			 */
-			u8 offset = (CHIP_REV_IS_SLOW(bp) ?
-				     BNX2X_MAX_EMUL_MULTI*(1 + port) :
-				     BNX2X_MAX_MULTICAST*(1 + port));
+	else {
+		/* some multicasts */
+		if (bnx2x_set_mc_list(bp))
+			rx_mode = BNX2X_RX_MODE_ALLMULTI;
 
-			bnx2x_set_e1_mc_list(bp, offset);
-		} else { /* E1H */
-			/* Accept one or more multicasts */
-			struct netdev_hw_addr *ha;
-			u32 mc_filter[MC_HASH_SIZE];
-			u32 crc, bit, regidx;
-			int i;
-
-			memset(mc_filter, 0, 4 * MC_HASH_SIZE);
-
-			netdev_for_each_mc_addr(ha, dev) {
-				DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
-				   bnx2x_mc_addr(ha));
-
-				crc = crc32c_le(0, bnx2x_mc_addr(ha),
-						ETH_ALEN);
-				bit = (crc >> 24) & 0xff;
-				regidx = bit >> 5;
-				bit &= 0x1f;
-				mc_filter[regidx] |= (1 << bit);
-			}
-
-			for (i = 0; i < MC_HASH_SIZE; i++)
-				REG_WR(bp, MC_HASH_OFFSET(bp, i),
-				       mc_filter[i]);
-		}
+		/* some unicasts */
+		if (bnx2x_set_uc_list(bp))
+			rx_mode = BNX2X_RX_MODE_PROMISC;
 	}
 
 	bp->rx_mode = rx_mode;
@@ -8950,7 +9295,7 @@
 	.ndo_stop		= bnx2x_close,
 	.ndo_start_xmit		= bnx2x_start_xmit,
 	.ndo_select_queue	= bnx2x_select_queue,
-	.ndo_set_multicast_list	= bnx2x_set_rx_mode,
+	.ndo_set_rx_mode	= bnx2x_set_rx_mode,
 	.ndo_set_mac_address	= bnx2x_change_mac_addr,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_do_ioctl		= bnx2x_ioctl,
@@ -9776,15 +10121,21 @@
 					HW_CID(bp, BNX2X_ISCSI_ETH_CID));
 		}
 
-		/* There may be not more than 8 L2 and COMMON SPEs and not more
-		 * than 8 L5 SPEs in the air.
+		/* There may be not more than 8 L2 and not more than 8 L5 SPEs
+		 * We also check that the number of outstanding
+		 * COMMON ramrods is not more than the EQ and SPQ can
+		 * accommodate.
 		 */
-		if ((type == NONE_CONNECTION_TYPE) ||
-		    (type == ETH_CONNECTION_TYPE)) {
-			if (!atomic_read(&bp->spq_left))
+		if (type == ETH_CONNECTION_TYPE) {
+			if (!atomic_read(&bp->cq_spq_left))
 				break;
 			else
-				atomic_dec(&bp->spq_left);
+				atomic_dec(&bp->cq_spq_left);
+		} else if (type == NONE_CONNECTION_TYPE) {
+			if (!atomic_read(&bp->eq_spq_left))
+				break;
+			else
+				atomic_dec(&bp->eq_spq_left);
 		} else if ((type == ISCSI_CONNECTION_TYPE) ||
 			   (type == FCOE_CONNECTION_TYPE)) {
 			if (bp->cnic_spq_pending >=
@@ -9862,7 +10213,8 @@
 	int rc = 0;
 
 	mutex_lock(&bp->cnic_mutex);
-	c_ops = bp->cnic_ops;
+	c_ops = rcu_dereference_protected(bp->cnic_ops,
+					  lockdep_is_held(&bp->cnic_mutex));
 	if (c_ops)
 		rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
 	mutex_unlock(&bp->cnic_mutex);
@@ -9976,7 +10328,7 @@
 		int count = ctl->data.credit.credit_count;
 
 		smp_mb__before_atomic_inc();
-		atomic_add(count, &bp->spq_left);
+		atomic_add(count, &bp->cq_spq_left);
 		smp_mb__after_atomic_inc();
 		break;
 	}
@@ -10072,6 +10424,13 @@
 	struct bnx2x *bp = netdev_priv(dev);
 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
 
+	/* If both iSCSI and FCoE are disabled - return NULL in
+	 * order to indicate CNIC that it should not try to work
+	 * with this device.
+	 */
+	if (NO_ISCSI(bp) && NO_FCOE(bp))
+		return NULL;
+
 	cp->drv_owner = THIS_MODULE;
 	cp->chip_id = CHIP_ID(bp);
 	cp->pdev = bp->pdev;
@@ -10092,6 +10451,15 @@
 		BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
 	cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
 
+	if (NO_ISCSI_OOO(bp))
+		cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+
+	if (NO_ISCSI(bp))
+		cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
+
+	if (NO_FCOE(bp))
+		cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
+
 	DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
 			 "starting cid %d\n",
 	   cp->ctx_blk_size,
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index c939683..1c89f19 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -6083,6 +6083,7 @@
 #define MDIO_PMA_REG_8727_PCS_OPT_CTRL		0xc808
 #define MDIO_PMA_REG_8727_GPIO_CTRL		0xc80e
 #define MDIO_PMA_REG_8727_PCS_GP		0xc842
+#define MDIO_PMA_REG_8727_OPT_CFG_REG		0xc8e4
 
 #define MDIO_AN_REG_8727_MISC_CTRL		0x8309
 
@@ -6194,7 +6195,11 @@
 #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER	0x0000
 #define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER		0x0100
 #define MDIO_CTL_REG_84823_MEDIA_FIBER_1G			0x1000
+#define MDIO_CTL_REG_84823_USER_CTRL_REG		0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS		0x0080
 
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1		0xa8e3
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN		0x0080
 
 #define IGU_FUNC_BASE			0x0400
 
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 171782e..1024ae1 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2470,6 +2470,10 @@
 	if (!(dev->flags & IFF_MASTER))
 		goto out;
 
+	skb = skb_share_check(skb, GFP_ATOMIC);
+	if (!skb)
+		goto out;
+
 	if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
 		goto out;
 
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index f4e638c..5c6fba8 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -326,6 +326,10 @@
 		goto out;
 	}
 
+	skb = skb_share_check(skb, GFP_ATOMIC);
+	if (!skb)
+		goto out;
+
 	if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
 		goto out;
 
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b1025b8..77e3c6a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1372,8 +1372,8 @@
 {
 	struct slave *slave;
 	struct net_device *bond_dev = bond->dev;
-	unsigned long features = bond_dev->features;
-	unsigned long vlan_features = 0;
+	u32 features = bond_dev->features;
+	u32 vlan_features = 0;
 	unsigned short max_hard_header_len = max((u16)ETH_HLEN,
 						bond_dev->hard_header_len);
 	int i;
@@ -1400,8 +1400,8 @@
 
 done:
 	features |= (bond_dev->features & BOND_VLAN_FEATURES);
-	bond_dev->features = netdev_fix_features(features, NULL);
-	bond_dev->vlan_features = netdev_fix_features(vlan_features, NULL);
+	bond_dev->features = netdev_fix_features(bond_dev, features);
+	bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features);
 	bond_dev->hard_header_len = max_hard_header_len;
 
 	return 0;
@@ -1594,9 +1594,9 @@
 		}
 	}
 
-	res = netdev_set_master(slave_dev, bond_dev);
+	res = netdev_set_bond_master(slave_dev, bond_dev);
 	if (res) {
-		pr_debug("Error %d calling netdev_set_master\n", res);
+		pr_debug("Error %d calling netdev_set_bond_master\n", res);
 		goto err_restore_mac;
 	}
 	/* open the slave since the application closed it */
@@ -1812,7 +1812,7 @@
 	dev_close(slave_dev);
 
 err_unset_master:
-	netdev_set_master(slave_dev, NULL);
+	netdev_set_bond_master(slave_dev, NULL);
 
 err_restore_mac:
 	if (!bond->params.fail_over_mac) {
@@ -1992,7 +1992,7 @@
 		netif_addr_unlock_bh(bond_dev);
 	}
 
-	netdev_set_master(slave_dev, NULL);
+	netdev_set_bond_master(slave_dev, NULL);
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	read_lock_bh(&bond->lock);
@@ -2114,7 +2114,7 @@
 			netif_addr_unlock_bh(bond_dev);
 		}
 
-		netdev_set_master(slave_dev, NULL);
+		netdev_set_bond_master(slave_dev, NULL);
 
 		/* close slave before restoring its mac address */
 		dev_close(slave_dev);
@@ -2733,6 +2733,10 @@
 	if (!slave || !slave_do_arp_validate(bond, slave))
 		goto out_unlock;
 
+	skb = skb_share_check(skb, GFP_ATOMIC);
+	if (!skb)
+		goto out_unlock;
+
 	if (!pskb_may_pull(skb, arp_hdr_len(dev)))
 		goto out_unlock;
 
@@ -4653,6 +4657,8 @@
 	.ndo_netpoll_cleanup	= bond_netpoll_cleanup,
 	.ndo_poll_controller	= bond_poll_controller,
 #endif
+	.ndo_add_slave		= bond_enslave,
+	.ndo_del_slave		= bond_release,
 };
 
 static void bond_destructor(struct net_device *bond_dev)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 8fd0174..72bb0f6 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1198,7 +1198,7 @@
 			bond->dev->name, new_value);
 	}
 out:
-	return count;
+	return ret;
 }
 static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR,
 		   bonding_show_carrier, bonding_store_carrier);
@@ -1595,7 +1595,7 @@
 		}
 	}
 out:
-	return count;
+	return ret;
 }
 static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
 		   bonding_show_slaves_active, bonding_store_slaves_active);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index d5a9db6..1d699e3 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -23,7 +23,7 @@
 
 	  As only the sending and receiving of CAN frames is implemented, this
 	  driver should work with the (serial/USB) CAN hardware from:
-	  www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de
+	  www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
 
 	  Userspace tools to attach the SLCAN line discipline (slcan_attach,
 	  slcand) can be found in the can-utils at the SocketCAN SVN, see
@@ -115,8 +115,12 @@
 
 source "drivers/net/can/sja1000/Kconfig"
 
+source "drivers/net/can/c_can/Kconfig"
+
 source "drivers/net/can/usb/Kconfig"
 
+source "drivers/net/can/softing/Kconfig"
+
 config CAN_DEBUG_DEVICES
 	bool "CAN devices debugging messages"
 	depends on CAN
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 07ca159..24ebfe8 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -9,9 +9,11 @@
 can-dev-y			:= dev.o
 
 obj-y				+= usb/
+obj-y				+= softing/
 
 obj-$(CONFIG_CAN_SJA1000)	+= sja1000/
 obj-$(CONFIG_CAN_MSCAN)		+= mscan/
+obj-$(CONFIG_CAN_C_CAN)		+= c_can/
 obj-$(CONFIG_CAN_AT91)		+= at91_can.o
 obj-$(CONFIG_CAN_TI_HECC)	+= ti_hecc.o
 obj-$(CONFIG_CAN_MCP251X)	+= mcp251x.o
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 7ef83d0..57d2ffb 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -2,7 +2,7 @@
  * at91_can.c - CAN network driver for AT91 SoC CAN controller
  *
  * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
- * (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de>
+ * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <kernel@pengutronix.de>
  *
  * This software may be distributed under the terms of the GNU General
  * Public License ("GPL") version 2 as distributed in the 'COPYING'
@@ -30,6 +30,7 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/string.h>
@@ -40,22 +41,23 @@
 
 #include <mach/board.h>
 
-#define AT91_NAPI_WEIGHT	12
+#define AT91_NAPI_WEIGHT	11
 
 /*
  * RX/TX Mailbox split
  * don't dare to touch
  */
-#define AT91_MB_RX_NUM		12
+#define AT91_MB_RX_NUM		11
 #define AT91_MB_TX_SHIFT	2
 
-#define AT91_MB_RX_FIRST	0
+#define AT91_MB_RX_FIRST	1
 #define AT91_MB_RX_LAST		(AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1)
 
 #define AT91_MB_RX_MASK(i)	((1 << (i)) - 1)
 #define AT91_MB_RX_SPLIT	8
 #define AT91_MB_RX_LOW_LAST	(AT91_MB_RX_SPLIT - 1)
-#define AT91_MB_RX_LOW_MASK	(AT91_MB_RX_MASK(AT91_MB_RX_SPLIT))
+#define AT91_MB_RX_LOW_MASK	(AT91_MB_RX_MASK(AT91_MB_RX_SPLIT) & \
+				 ~AT91_MB_RX_MASK(AT91_MB_RX_FIRST))
 
 #define AT91_MB_TX_NUM		(1 << AT91_MB_TX_SHIFT)
 #define AT91_MB_TX_FIRST	(AT91_MB_RX_LAST + 1)
@@ -168,6 +170,8 @@
 
 	struct clk		*clk;
 	struct at91_can_data	*pdata;
+
+	canid_t			mb0_id;
 };
 
 static struct can_bittiming_const at91_bittiming_const = {
@@ -220,6 +224,18 @@
 	set_mb_mode_prio(priv, mb, mode, 0);
 }
 
+static inline u32 at91_can_id_to_reg_mid(canid_t can_id)
+{
+	u32 reg_mid;
+
+	if (can_id & CAN_EFF_FLAG)
+		reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
+	else
+		reg_mid = (can_id & CAN_SFF_MASK) << 18;
+
+	return reg_mid;
+}
+
 /*
  * Swtich transceiver on or off
  */
@@ -233,12 +249,22 @@
 {
 	struct at91_priv *priv = netdev_priv(dev);
 	unsigned int i;
+	u32 reg_mid;
 
 	/*
-	 * The first 12 mailboxes are used as a reception FIFO. The
-	 * last mailbox is configured with overwrite option. The
-	 * overwrite flag indicates a FIFO overflow.
+	 * Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first
+	 * mailbox is disabled. The next 11 mailboxes are used as a
+	 * reception FIFO. The last mailbox is configured with
+	 * overwrite option. The overwrite flag indicates a FIFO
+	 * overflow.
 	 */
+	reg_mid = at91_can_id_to_reg_mid(priv->mb0_id);
+	for (i = 0; i < AT91_MB_RX_FIRST; i++) {
+		set_mb_mode(priv, i, AT91_MB_MODE_DISABLED);
+		at91_write(priv, AT91_MID(i), reg_mid);
+		at91_write(priv, AT91_MCR(i), 0x0);	/* clear dlc */
+	}
+
 	for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++)
 		set_mb_mode(priv, i, AT91_MB_MODE_RX);
 	set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
@@ -254,7 +280,8 @@
 		set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
 
 	/* Reset tx and rx helper pointers */
-	priv->tx_next = priv->tx_echo = priv->rx_next = 0;
+	priv->tx_next = priv->tx_echo = 0;
+	priv->rx_next = AT91_MB_RX_FIRST;
 }
 
 static int at91_set_bittiming(struct net_device *dev)
@@ -372,12 +399,7 @@
 		netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
 		return NETDEV_TX_BUSY;
 	}
-
-	if (cf->can_id & CAN_EFF_FLAG)
-		reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
-	else
-		reg_mid = (cf->can_id & CAN_SFF_MASK) << 18;
-
+	reg_mid = at91_can_id_to_reg_mid(cf->can_id);
 	reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
 		(cf->can_dlc << 16) | AT91_MCR_MTCR;
 
@@ -539,27 +561,31 @@
  *
  * Theory of Operation:
  *
- * 12 of the 16 mailboxes on the chip are reserved for RX. we split
- * them into 2 groups. The lower group holds 8 and upper 4 mailboxes.
+ * 11 of the 16 mailboxes on the chip are reserved for RX. we split
+ * them into 2 groups. The lower group holds 7 and upper 4 mailboxes.
  *
  * Like it or not, but the chip always saves a received CAN message
  * into the first free mailbox it finds (starting with the
  * lowest). This makes it very difficult to read the messages in the
  * right order from the chip. This is how we work around that problem:
  *
- * The first message goes into mb nr. 0 and issues an interrupt. All
+ * The first message goes into mb nr. 1 and issues an interrupt. All
  * rx ints are disabled in the interrupt handler and a napi poll is
  * scheduled. We read the mailbox, but do _not_ reenable the mb (to
  * receive another message).
  *
  *    lower mbxs      upper
- *   ______^______    __^__
- *  /             \  /     \
+ *     ____^______    __^__
+ *    /           \  /     \
  * +-+-+-+-+-+-+-+-++-+-+-+-+
- * |x|x|x|x|x|x|x|x|| | | | |
+ * | |x|x|x|x|x|x|x|| | | | |
  * +-+-+-+-+-+-+-+-++-+-+-+-+
  *  0 0 0 0 0 0  0 0 0 0 1 1  \ mail
  *  0 1 2 3 4 5  6 7 8 9 0 1  / box
+ *  ^
+ *  |
+ *   \
+ *     unused, due to chip bug
  *
  * The variable priv->rx_next points to the next mailbox to read a
  * message from. As long we're in the lower mailboxes we just read the
@@ -590,10 +616,10 @@
 			"order of incoming frames cannot be guaranteed\n");
 
  again:
-	for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
-	     mb < AT91_MB_RX_NUM && quota > 0;
+	for (mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, priv->rx_next);
+	     mb < AT91_MB_RX_LAST + 1 && quota > 0;
 	     reg_sr = at91_read(priv, AT91_SR),
-	     mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) {
+	     mb = find_next_bit(addr, AT91_MB_RX_LAST + 1, ++priv->rx_next)) {
 		at91_read_msg(dev, mb);
 
 		/* reactivate mailboxes */
@@ -610,8 +636,8 @@
 
 	/* upper group completed, look again in lower */
 	if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
-	    quota > 0 && mb >= AT91_MB_RX_NUM) {
-		priv->rx_next = 0;
+	    quota > 0 && mb > AT91_MB_RX_LAST) {
+		priv->rx_next = AT91_MB_RX_FIRST;
 		goto again;
 	}
 
@@ -1037,6 +1063,64 @@
 	.ndo_start_xmit	= at91_start_xmit,
 };
 
+static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct at91_priv *priv = netdev_priv(to_net_dev(dev));
+
+	if (priv->mb0_id & CAN_EFF_FLAG)
+		return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id);
+	else
+		return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id);
+}
+
+static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct net_device *ndev = to_net_dev(dev);
+	struct at91_priv *priv = netdev_priv(ndev);
+	unsigned long can_id;
+	ssize_t ret;
+	int err;
+
+	rtnl_lock();
+
+	if (ndev->flags & IFF_UP) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	err = strict_strtoul(buf, 0, &can_id);
+	if (err) {
+		ret = err;
+		goto out;
+	}
+
+	if (can_id & CAN_EFF_FLAG)
+		can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
+	else
+		can_id &= CAN_SFF_MASK;
+
+	priv->mb0_id = can_id;
+	ret = count;
+
+ out:
+	rtnl_unlock();
+	return ret;
+}
+
+static DEVICE_ATTR(mb0_id, S_IWUSR | S_IRUGO,
+	at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id);
+
+static struct attribute *at91_sysfs_attrs[] = {
+	&dev_attr_mb0_id.attr,
+	NULL,
+};
+
+static struct attribute_group at91_sysfs_attr_group = {
+	.attrs = at91_sysfs_attrs,
+};
+
 static int __devinit at91_can_probe(struct platform_device *pdev)
 {
 	struct net_device *dev;
@@ -1082,6 +1166,7 @@
 	dev->netdev_ops	= &at91_netdev_ops;
 	dev->irq = irq;
 	dev->flags |= IFF_ECHO;
+	dev->sysfs_groups[0] = &at91_sysfs_attr_group;
 
 	priv = netdev_priv(dev);
 	priv->can.clock.freq = clk_get_rate(clk);
@@ -1093,6 +1178,7 @@
 	priv->dev = dev;
 	priv->clk = clk;
 	priv->pdata = pdev->dev.platform_data;
+	priv->mb0_id = 0x7ff;
 
 	netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT);
 
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
new file mode 100644
index 0000000..ffb9773
--- /dev/null
+++ b/drivers/net/can/c_can/Kconfig
@@ -0,0 +1,15 @@
+menuconfig CAN_C_CAN
+	tristate "Bosch C_CAN devices"
+	depends on CAN_DEV && HAS_IOMEM
+
+if CAN_C_CAN
+
+config CAN_C_CAN_PLATFORM
+	tristate "Generic Platform Bus based C_CAN driver"
+	---help---
+	  This driver adds support for the C_CAN chips connected to
+	  the "platform bus" (Linux abstraction for directly to the
+	  processor attached devices) which can be found on various
+	  boards from ST Microelectronics (http://www.st.com)
+	  like the SPEAr1310 and SPEAr320 evaluation boards.
+endif
diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile
new file mode 100644
index 0000000..9273f6d
--- /dev/null
+++ b/drivers/net/can/c_can/Makefile
@@ -0,0 +1,8 @@
+#
+#  Makefile for the Bosch C_CAN controller drivers.
+#
+
+obj-$(CONFIG_CAN_C_CAN) += c_can.o
+obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
new file mode 100644
index 0000000..1405078
--- /dev/null
+++ b/drivers/net/can/c_can/c_can.c
@@ -0,0 +1,1158 @@
+/*
+ * CAN bus driver for Bosch C_CAN controller
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Bhupesh Sharma <bhupesh.sharma@st.com>
+ *
+ * Borrowed heavily from the C_CAN driver originally written by:
+ * Copyright (C) 2007
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
+ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
+ *
+ * TX and RX NAPI implementation has been borrowed from at91 CAN driver
+ * written by:
+ * Copyright
+ * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
+ * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
+ * Bosch C_CAN user manual can be obtained from:
+ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
+ * users_manual_c_can.pdf
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#include "c_can.h"
+
+/* control register */
+#define CONTROL_TEST		BIT(7)
+#define CONTROL_CCE		BIT(6)
+#define CONTROL_DISABLE_AR	BIT(5)
+#define CONTROL_ENABLE_AR	(0 << 5)
+#define CONTROL_EIE		BIT(3)
+#define CONTROL_SIE		BIT(2)
+#define CONTROL_IE		BIT(1)
+#define CONTROL_INIT		BIT(0)
+
+/* test register */
+#define TEST_RX			BIT(7)
+#define TEST_TX1		BIT(6)
+#define TEST_TX2		BIT(5)
+#define TEST_LBACK		BIT(4)
+#define TEST_SILENT		BIT(3)
+#define TEST_BASIC		BIT(2)
+
+/* status register */
+#define STATUS_BOFF		BIT(7)
+#define STATUS_EWARN		BIT(6)
+#define STATUS_EPASS		BIT(5)
+#define STATUS_RXOK		BIT(4)
+#define STATUS_TXOK		BIT(3)
+
+/* error counter register */
+#define ERR_CNT_TEC_MASK	0xff
+#define ERR_CNT_TEC_SHIFT	0
+#define ERR_CNT_REC_SHIFT	8
+#define ERR_CNT_REC_MASK	(0x7f << ERR_CNT_REC_SHIFT)
+#define ERR_CNT_RP_SHIFT	15
+#define ERR_CNT_RP_MASK		(0x1 << ERR_CNT_RP_SHIFT)
+
+/* bit-timing register */
+#define BTR_BRP_MASK		0x3f
+#define BTR_BRP_SHIFT		0
+#define BTR_SJW_SHIFT		6
+#define BTR_SJW_MASK		(0x3 << BTR_SJW_SHIFT)
+#define BTR_TSEG1_SHIFT		8
+#define BTR_TSEG1_MASK		(0xf << BTR_TSEG1_SHIFT)
+#define BTR_TSEG2_SHIFT		12
+#define BTR_TSEG2_MASK		(0x7 << BTR_TSEG2_SHIFT)
+
+/* brp extension register */
+#define BRP_EXT_BRPE_MASK	0x0f
+#define BRP_EXT_BRPE_SHIFT	0
+
+/* IFx command request */
+#define IF_COMR_BUSY		BIT(15)
+
+/* IFx command mask */
+#define IF_COMM_WR		BIT(7)
+#define IF_COMM_MASK		BIT(6)
+#define IF_COMM_ARB		BIT(5)
+#define IF_COMM_CONTROL		BIT(4)
+#define IF_COMM_CLR_INT_PND	BIT(3)
+#define IF_COMM_TXRQST		BIT(2)
+#define IF_COMM_DATAA		BIT(1)
+#define IF_COMM_DATAB		BIT(0)
+#define IF_COMM_ALL		(IF_COMM_MASK | IF_COMM_ARB | \
+				IF_COMM_CONTROL | IF_COMM_TXRQST | \
+				IF_COMM_DATAA | IF_COMM_DATAB)
+
+/* IFx arbitration */
+#define IF_ARB_MSGVAL		BIT(15)
+#define IF_ARB_MSGXTD		BIT(14)
+#define IF_ARB_TRANSMIT		BIT(13)
+
+/* IFx message control */
+#define IF_MCONT_NEWDAT		BIT(15)
+#define IF_MCONT_MSGLST		BIT(14)
+#define IF_MCONT_CLR_MSGLST	(0 << 14)
+#define IF_MCONT_INTPND		BIT(13)
+#define IF_MCONT_UMASK		BIT(12)
+#define IF_MCONT_TXIE		BIT(11)
+#define IF_MCONT_RXIE		BIT(10)
+#define IF_MCONT_RMTEN		BIT(9)
+#define IF_MCONT_TXRQST		BIT(8)
+#define IF_MCONT_EOB		BIT(7)
+#define IF_MCONT_DLC_MASK	0xf
+
+/*
+ * IFx register masks:
+ * allow easy operation on 16-bit registers when the
+ * argument is 32-bit instead
+ */
+#define IFX_WRITE_LOW_16BIT(x)	((x) & 0xFFFF)
+#define IFX_WRITE_HIGH_16BIT(x)	(((x) & 0xFFFF0000) >> 16)
+
+/* message object split */
+#define C_CAN_NO_OF_OBJECTS	32
+#define C_CAN_MSG_OBJ_RX_NUM	16
+#define C_CAN_MSG_OBJ_TX_NUM	16
+
+#define C_CAN_MSG_OBJ_RX_FIRST	1
+#define C_CAN_MSG_OBJ_RX_LAST	(C_CAN_MSG_OBJ_RX_FIRST + \
+				C_CAN_MSG_OBJ_RX_NUM - 1)
+
+#define C_CAN_MSG_OBJ_TX_FIRST	(C_CAN_MSG_OBJ_RX_LAST + 1)
+#define C_CAN_MSG_OBJ_TX_LAST	(C_CAN_MSG_OBJ_TX_FIRST + \
+				C_CAN_MSG_OBJ_TX_NUM - 1)
+
+#define C_CAN_MSG_OBJ_RX_SPLIT	9
+#define C_CAN_MSG_RX_LOW_LAST	(C_CAN_MSG_OBJ_RX_SPLIT - 1)
+
+#define C_CAN_NEXT_MSG_OBJ_MASK	(C_CAN_MSG_OBJ_TX_NUM - 1)
+#define RECEIVE_OBJECT_BITS	0x0000ffff
+
+/* status interrupt */
+#define STATUS_INTERRUPT	0x8000
+
+/* global interrupt masks */
+#define ENABLE_ALL_INTERRUPTS	1
+#define DISABLE_ALL_INTERRUPTS	0
+
+/* minimum timeout for checking BUSY status */
+#define MIN_TIMEOUT_VALUE	6
+
+/* napi related */
+#define C_CAN_NAPI_WEIGHT	C_CAN_MSG_OBJ_RX_NUM
+
+/* c_can lec values */
+enum c_can_lec_type {
+	LEC_NO_ERROR = 0,
+	LEC_STUFF_ERROR,
+	LEC_FORM_ERROR,
+	LEC_ACK_ERROR,
+	LEC_BIT1_ERROR,
+	LEC_BIT0_ERROR,
+	LEC_CRC_ERROR,
+	LEC_UNUSED,
+};
+
+/*
+ * c_can error types:
+ * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
+ */
+enum c_can_bus_error_types {
+	C_CAN_NO_ERROR = 0,
+	C_CAN_BUS_OFF,
+	C_CAN_ERROR_WARNING,
+	C_CAN_ERROR_PASSIVE,
+};
+
+static struct can_bittiming_const c_can_bittiming_const = {
+	.name = KBUILD_MODNAME,
+	.tseg1_min = 2,		/* Time segment 1 = prop_seg + phase_seg1 */
+	.tseg1_max = 16,
+	.tseg2_min = 1,		/* Time segment 2 = phase_seg2 */
+	.tseg2_max = 8,
+	.sjw_max = 4,
+	.brp_min = 1,
+	.brp_max = 1024,	/* 6-bit BRP field + 4-bit BRPE field*/
+	.brp_inc = 1,
+};
+
+static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
+{
+	return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
+			C_CAN_MSG_OBJ_TX_FIRST;
+}
+
+static inline int get_tx_echo_msg_obj(const struct c_can_priv *priv)
+{
+	return (priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) +
+			C_CAN_MSG_OBJ_TX_FIRST;
+}
+
+static u32 c_can_read_reg32(struct c_can_priv *priv, void *reg)
+{
+	u32 val = priv->read_reg(priv, reg);
+	val |= ((u32) priv->read_reg(priv, reg + 2)) << 16;
+	return val;
+}
+
+static void c_can_enable_all_interrupts(struct c_can_priv *priv,
+						int enable)
+{
+	unsigned int cntrl_save = priv->read_reg(priv,
+						&priv->regs->control);
+
+	if (enable)
+		cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
+	else
+		cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
+
+	priv->write_reg(priv, &priv->regs->control, cntrl_save);
+}
+
+static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
+{
+	int count = MIN_TIMEOUT_VALUE;
+
+	while (count && priv->read_reg(priv,
+				&priv->regs->ifregs[iface].com_req) &
+				IF_COMR_BUSY) {
+		count--;
+		udelay(1);
+	}
+
+	if (!count)
+		return 1;
+
+	return 0;
+}
+
+static inline void c_can_object_get(struct net_device *dev,
+					int iface, int objno, int mask)
+{
+	struct c_can_priv *priv = netdev_priv(dev);
+
+	/*
+	 * As per specs, after writting the message object number in the
+	 * IF command request register the transfer b/w interface
+	 * register and message RAM must be complete in 6 CAN-CLK
+	 * period.
+	 */
+	priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
+			IFX_WRITE_LOW_16BIT(mask));
+	priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
+			IFX_WRITE_LOW_16BIT(objno));
+
+	if (c_can_msg_obj_is_busy(priv, iface))
+		netdev_err(dev, "timed out in object get\n");
+}
+
+static inline void c_can_object_put(struct net_device *dev,
+					int iface, int objno, int mask)
+{
+	struct c_can_priv *priv = netdev_priv(dev);
+
+	/*
+	 * As per specs, after writting the message object number in the
+	 * IF command request register the transfer b/w interface
+	 * register and message RAM must be complete in 6 CAN-CLK
+	 * period.
+	 */
+	priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
+			(IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
+	priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
+			IFX_WRITE_LOW_16BIT(objno));
+
+	if (c_can_msg_obj_is_busy(priv, iface))
+		netdev_err(dev, "timed out in object put\n");
+}
+
+static void c_can_write_msg_object(struct net_device *dev,
+			int iface, struct can_frame *frame, int objno)
+{
+	int i;
+	u16 flags = 0;
+	unsigned int id;
+	struct c_can_priv *priv = netdev_priv(dev);
+
+	if (!(frame->can_id & CAN_RTR_FLAG))
+		flags |= IF_ARB_TRANSMIT;
+
+	if (frame->can_id & CAN_EFF_FLAG) {
+		id = frame->can_id & CAN_EFF_MASK;
+		flags |= IF_ARB_MSGXTD;
+	} else
+		id = ((frame->can_id & CAN_SFF_MASK) << 18);
+
+	flags |= IF_ARB_MSGVAL;
+
+	priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+				IFX_WRITE_LOW_16BIT(id));
+	priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, flags |
+				IFX_WRITE_HIGH_16BIT(id));
+
+	for (i = 0; i < frame->can_dlc; i += 2) {
+		priv->write_reg(priv, &priv->regs->ifregs[iface].data[i / 2],
+				frame->data[i] | (frame->data[i + 1] << 8));
+	}
+
+	/* enable interrupt for this message object */
+	priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+			IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
+			frame->can_dlc);
+	c_can_object_put(dev, iface, objno, IF_COMM_ALL);
+}
+
+static inline void c_can_mark_rx_msg_obj(struct net_device *dev,
+						int iface, int ctrl_mask,
+						int obj)
+{
+	struct c_can_priv *priv = netdev_priv(dev);
+
+	priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+			ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND));
+	c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
+
+}
+
+static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
+						int iface,
+						int ctrl_mask)
+{
+	int i;
+	struct c_can_priv *priv = netdev_priv(dev);
+
+	for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
+		priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+				ctrl_mask & ~(IF_MCONT_MSGLST |
+					IF_MCONT_INTPND | IF_MCONT_NEWDAT));
+		c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
+	}
+}
+
+static inline void c_can_activate_rx_msg_obj(struct net_device *dev,
+						int iface, int ctrl_mask,
+						int obj)
+{
+	struct c_can_priv *priv = netdev_priv(dev);
+
+	priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+			ctrl_mask & ~(IF_MCONT_MSGLST |
+				IF_MCONT_INTPND | IF_MCONT_NEWDAT));
+	c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
+}
+
+static void c_can_handle_lost_msg_obj(struct net_device *dev,
+					int iface, int objno)
+{
+	struct c_can_priv *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	struct sk_buff *skb;
+	struct can_frame *frame;
+
+	netdev_err(dev, "msg lost in buffer %d\n", objno);
+
+	c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
+
+	priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+			IF_MCONT_CLR_MSGLST);
+
+	c_can_object_put(dev, 0, objno, IF_COMM_CONTROL);
+
+	/* create an error msg */
+	skb = alloc_can_err_skb(dev, &frame);
+	if (unlikely(!skb))
+		return;
+
+	frame->can_id |= CAN_ERR_CRTL;
+	frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+	stats->rx_errors++;
+	stats->rx_over_errors++;
+
+	netif_receive_skb(skb);
+}
+
+static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
+{
+	u16 flags, data;
+	int i;
+	unsigned int val;
+	struct c_can_priv *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	struct sk_buff *skb;
+	struct can_frame *frame;
+
+	skb = alloc_can_skb(dev, &frame);
+	if (!skb) {
+		stats->rx_dropped++;
+		return -ENOMEM;
+	}
+
+	frame->can_dlc = get_can_dlc(ctrl & 0x0F);
+
+	flags =	priv->read_reg(priv, &priv->regs->ifregs[iface].arb2);
+	val = priv->read_reg(priv, &priv->regs->ifregs[iface].arb1) |
+		(flags << 16);
+
+	if (flags & IF_ARB_MSGXTD)
+		frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG;
+	else
+		frame->can_id = (val >> 18) & CAN_SFF_MASK;
+
+	if (flags & IF_ARB_TRANSMIT)
+		frame->can_id |= CAN_RTR_FLAG;
+	else {
+		for (i = 0; i < frame->can_dlc; i += 2) {
+			data = priv->read_reg(priv,
+				&priv->regs->ifregs[iface].data[i / 2]);
+			frame->data[i] = data;
+			frame->data[i + 1] = data >> 8;
+		}
+	}
+
+	netif_receive_skb(skb);
+
+	stats->rx_packets++;
+	stats->rx_bytes += frame->can_dlc;
+
+	return 0;
+}
+
+static void c_can_setup_receive_object(struct net_device *dev, int iface,
+					int objno, unsigned int mask,
+					unsigned int id, unsigned int mcont)
+{
+	struct c_can_priv *priv = netdev_priv(dev);
+
+	priv->write_reg(priv, &priv->regs->ifregs[iface].mask1,
+			IFX_WRITE_LOW_16BIT(mask));
+	priv->write_reg(priv, &priv->regs->ifregs[iface].mask2,
+			IFX_WRITE_HIGH_16BIT(mask));
+
+	priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+			IFX_WRITE_LOW_16BIT(id));
+	priv->write_reg(priv, &priv->regs->ifregs[iface].arb2,
+			(IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
+
+	priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, mcont);
+	c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
+
+	netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
+			c_can_read_reg32(priv, &priv->regs->msgval1));
+}
+
+static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
+{
+	struct c_can_priv *priv = netdev_priv(dev);
+
+	priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 0);
+	priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, 0);
+	priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 0);
+
+	c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
+
+	netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
+			c_can_read_reg32(priv, &priv->regs->msgval1));
+}
+
+static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
+{
+	int val = c_can_read_reg32(priv, &priv->regs->txrqst1);
+
+	/*
+	 * as transmission request register's bit n-1 corresponds to
+	 * message object n, we need to handle the same properly.
+	 */
+	if (val & (1 << (objno - 1)))
+		return 1;
+
+	return 0;
+}
+
+static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
+					struct net_device *dev)
+{
+	u32 msg_obj_no;
+	struct c_can_priv *priv = netdev_priv(dev);
+	struct can_frame *frame = (struct can_frame *)skb->data;
+
+	if (can_dropped_invalid_skb(dev, skb))
+		return NETDEV_TX_OK;
+
+	msg_obj_no = get_tx_next_msg_obj(priv);
+
+	/* prepare message object for transmission */
+	c_can_write_msg_object(dev, 0, frame, msg_obj_no);
+	can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
+
+	/*
+	 * we have to stop the queue in case of a wrap around or
+	 * if the next TX message object is still in use
+	 */
+	priv->tx_next++;
+	if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
+			(priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
+		netif_stop_queue(dev);
+
+	return NETDEV_TX_OK;
+}
+
+static int c_can_set_bittiming(struct net_device *dev)
+{
+	unsigned int reg_btr, reg_brpe, ctrl_save;
+	u8 brp, brpe, sjw, tseg1, tseg2;
+	u32 ten_bit_brp;
+	struct c_can_priv *priv = netdev_priv(dev);
+	const struct can_bittiming *bt = &priv->can.bittiming;
+
+	/* c_can provides a 6-bit brp and 4-bit brpe fields */
+	ten_bit_brp = bt->brp - 1;
+	brp = ten_bit_brp & BTR_BRP_MASK;
+	brpe = ten_bit_brp >> 6;
+
+	sjw = bt->sjw - 1;
+	tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
+	tseg2 = bt->phase_seg2 - 1;
+	reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) |
+			(tseg2 << BTR_TSEG2_SHIFT);
+	reg_brpe = brpe & BRP_EXT_BRPE_MASK;
+
+	netdev_info(dev,
+		"setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
+
+	ctrl_save = priv->read_reg(priv, &priv->regs->control);
+	priv->write_reg(priv, &priv->regs->control,
+			ctrl_save | CONTROL_CCE | CONTROL_INIT);
+	priv->write_reg(priv, &priv->regs->btr, reg_btr);
+	priv->write_reg(priv, &priv->regs->brp_ext, reg_brpe);
+	priv->write_reg(priv, &priv->regs->control, ctrl_save);
+
+	return 0;
+}
+
+/*
+ * Configure C_CAN message objects for Tx and Rx purposes:
+ * C_CAN provides a total of 32 message objects that can be configured
+ * either for Tx or Rx purposes. Here the first 16 message objects are used as
+ * a reception FIFO. The end of reception FIFO is signified by the EoB bit
+ * being SET. The remaining 16 message objects are kept aside for Tx purposes.
+ * See user guide document for further details on configuring message
+ * objects.
+ */
+static void c_can_configure_msg_objects(struct net_device *dev)
+{
+	int i;
+
+	/* first invalidate all message objects */
+	for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
+		c_can_inval_msg_object(dev, 0, i);
+
+	/* setup receive message objects */
+	for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
+		c_can_setup_receive_object(dev, 0, i, 0, 0,
+			(IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB);
+
+	c_can_setup_receive_object(dev, 0, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
+			IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK);
+}
+
+/*
+ * Configure C_CAN chip:
+ * - enable/disable auto-retransmission
+ * - set operating mode
+ * - configure message objects
+ */
+static void c_can_chip_config(struct net_device *dev)
+{
+	struct c_can_priv *priv = netdev_priv(dev);
+
+	if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+		/* disable automatic retransmission */
+		priv->write_reg(priv, &priv->regs->control,
+				CONTROL_DISABLE_AR);
+	else
+		/* enable automatic retransmission */
+		priv->write_reg(priv, &priv->regs->control,
+				CONTROL_ENABLE_AR);
+
+	if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
+					CAN_CTRLMODE_LOOPBACK)) {
+		/* loopback + silent mode : useful for hot self-test */
+		priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+				CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+		priv->write_reg(priv, &priv->regs->test,
+				TEST_LBACK | TEST_SILENT);
+	} else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+		/* loopback mode : useful for self-test function */
+		priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+				CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+		priv->write_reg(priv, &priv->regs->test, TEST_LBACK);
+	} else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+		/* silent mode : bus-monitoring mode */
+		priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+				CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
+		priv->write_reg(priv, &priv->regs->test, TEST_SILENT);
+	} else
+		/* normal mode*/
+		priv->write_reg(priv, &priv->regs->control,
+				CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
+
+	/* configure message objects */
+	c_can_configure_msg_objects(dev);
+
+	/* set a `lec` value so that we can check for updates later */
+	priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
+
+	/* set bittiming params */
+	c_can_set_bittiming(dev);
+}
+
+static void c_can_start(struct net_device *dev)
+{
+	struct c_can_priv *priv = netdev_priv(dev);
+
+	/* enable status change, error and module interrupts */
+	c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
+
+	/* basic c_can configuration */
+	c_can_chip_config(dev);
+
+	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+	/* reset tx helper pointers */
+	priv->tx_next = priv->tx_echo = 0;
+}
+
+static void c_can_stop(struct net_device *dev)
+{
+	struct c_can_priv *priv = netdev_priv(dev);
+
+	/* disable all interrupts */
+	c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+
+	/* set the state as STOPPED */
+	priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
+{
+	switch (mode) {
+	case CAN_MODE_START:
+		c_can_start(dev);
+		netif_wake_queue(dev);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static int c_can_get_berr_counter(const struct net_device *dev,
+					struct can_berr_counter *bec)
+{
+	unsigned int reg_err_counter;
+	struct c_can_priv *priv = netdev_priv(dev);
+
+	reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
+	bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
+				ERR_CNT_REC_SHIFT;
+	bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
+
+	return 0;
+}
+
+/*
+ * theory of operation:
+ *
+ * priv->tx_echo holds the number of the oldest can_frame put for
+ * transmission into the hardware, but not yet ACKed by the CAN tx
+ * complete IRQ.
+ *
+ * We iterate from priv->tx_echo to priv->tx_next and check if the
+ * packet has been transmitted, echo it back to the CAN framework.
+ * If we discover a not yet transmitted package, stop looking for more.
+ */
+static void c_can_do_tx(struct net_device *dev)
+{
+	u32 val;
+	u32 msg_obj_no;
+	struct c_can_priv *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+
+	for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
+		msg_obj_no = get_tx_echo_msg_obj(priv);
+		c_can_inval_msg_object(dev, 0, msg_obj_no);
+		val = c_can_read_reg32(priv, &priv->regs->txrqst1);
+		if (!(val & (1 << msg_obj_no))) {
+			can_get_echo_skb(dev,
+					msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
+			stats->tx_bytes += priv->read_reg(priv,
+					&priv->regs->ifregs[0].msg_cntrl)
+					& IF_MCONT_DLC_MASK;
+			stats->tx_packets++;
+		}
+	}
+
+	/* restart queue if wrap-up or if queue stalled on last pkt */
+	if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
+			((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
+		netif_wake_queue(dev);
+}
+
+/*
+ * theory of operation:
+ *
+ * c_can core saves a received CAN message into the first free message
+ * object it finds free (starting with the lowest). Bits NEWDAT and
+ * INTPND are set for this message object indicating that a new message
+ * has arrived. To work-around this issue, we keep two groups of message
+ * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
+ *
+ * To ensure in-order frame reception we use the following
+ * approach while re-activating a message object to receive further
+ * frames:
+ * - if the current message object number is lower than
+ *   C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
+ *   the INTPND bit.
+ * - if the current message object number is equal to
+ *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
+ *   receive message objects.
+ * - if the current message object number is greater than
+ *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
+ *   only this message object.
+ */
+static int c_can_do_rx_poll(struct net_device *dev, int quota)
+{
+	u32 num_rx_pkts = 0;
+	unsigned int msg_obj, msg_ctrl_save;
+	struct c_can_priv *priv = netdev_priv(dev);
+	u32 val = c_can_read_reg32(priv, &priv->regs->intpnd1);
+
+	for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST;
+			msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0;
+			val = c_can_read_reg32(priv, &priv->regs->intpnd1),
+			msg_obj++) {
+		/*
+		 * as interrupt pending register's bit n-1 corresponds to
+		 * message object n, we need to handle the same properly.
+		 */
+		if (val & (1 << (msg_obj - 1))) {
+			c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
+					~IF_COMM_TXRQST);
+			msg_ctrl_save = priv->read_reg(priv,
+					&priv->regs->ifregs[0].msg_cntrl);
+
+			if (msg_ctrl_save & IF_MCONT_EOB)
+				return num_rx_pkts;
+
+			if (msg_ctrl_save & IF_MCONT_MSGLST) {
+				c_can_handle_lost_msg_obj(dev, 0, msg_obj);
+				num_rx_pkts++;
+				quota--;
+				continue;
+			}
+
+			if (!(msg_ctrl_save & IF_MCONT_NEWDAT))
+				continue;
+
+			/* read the data from the message object */
+			c_can_read_msg_object(dev, 0, msg_ctrl_save);
+
+			if (msg_obj < C_CAN_MSG_RX_LOW_LAST)
+				c_can_mark_rx_msg_obj(dev, 0,
+						msg_ctrl_save, msg_obj);
+			else if (msg_obj > C_CAN_MSG_RX_LOW_LAST)
+				/* activate this msg obj */
+				c_can_activate_rx_msg_obj(dev, 0,
+						msg_ctrl_save, msg_obj);
+			else if (msg_obj == C_CAN_MSG_RX_LOW_LAST)
+				/* activate all lower message objects */
+				c_can_activate_all_lower_rx_msg_obj(dev,
+						0, msg_ctrl_save);
+
+			num_rx_pkts++;
+			quota--;
+		}
+	}
+
+	return num_rx_pkts;
+}
+
+static inline int c_can_has_and_handle_berr(struct c_can_priv *priv)
+{
+	return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
+		(priv->current_status & LEC_UNUSED);
+}
+
+static int c_can_handle_state_change(struct net_device *dev,
+				enum c_can_bus_error_types error_type)
+{
+	unsigned int reg_err_counter;
+	unsigned int rx_err_passive;
+	struct c_can_priv *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	struct can_frame *cf;
+	struct sk_buff *skb;
+	struct can_berr_counter bec;
+
+	/* propogate the error condition to the CAN stack */
+	skb = alloc_can_err_skb(dev, &cf);
+	if (unlikely(!skb))
+		return 0;
+
+	c_can_get_berr_counter(dev, &bec);
+	reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
+	rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
+				ERR_CNT_RP_SHIFT;
+
+	switch (error_type) {
+	case C_CAN_ERROR_WARNING:
+		/* error warning state */
+		priv->can.can_stats.error_warning++;
+		priv->can.state = CAN_STATE_ERROR_WARNING;
+		cf->can_id |= CAN_ERR_CRTL;
+		cf->data[1] = (bec.txerr > bec.rxerr) ?
+			CAN_ERR_CRTL_TX_WARNING :
+			CAN_ERR_CRTL_RX_WARNING;
+		cf->data[6] = bec.txerr;
+		cf->data[7] = bec.rxerr;
+
+		break;
+	case C_CAN_ERROR_PASSIVE:
+		/* error passive state */
+		priv->can.can_stats.error_passive++;
+		priv->can.state = CAN_STATE_ERROR_PASSIVE;
+		cf->can_id |= CAN_ERR_CRTL;
+		if (rx_err_passive)
+			cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+		if (bec.txerr > 127)
+			cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
+
+		cf->data[6] = bec.txerr;
+		cf->data[7] = bec.rxerr;
+		break;
+	case C_CAN_BUS_OFF:
+		/* bus-off state */
+		priv->can.state = CAN_STATE_BUS_OFF;
+		cf->can_id |= CAN_ERR_BUSOFF;
+		/*
+		 * disable all interrupts in bus-off mode to ensure that
+		 * the CPU is not hogged down
+		 */
+		c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+		can_bus_off(dev);
+		break;
+	default:
+		break;
+	}
+
+	netif_receive_skb(skb);
+	stats->rx_packets++;
+	stats->rx_bytes += cf->can_dlc;
+
+	return 1;
+}
+
+static int c_can_handle_bus_err(struct net_device *dev,
+				enum c_can_lec_type lec_type)
+{
+	struct c_can_priv *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	struct can_frame *cf;
+	struct sk_buff *skb;
+
+	/*
+	 * early exit if no lec update or no error.
+	 * no lec update means that no CAN bus event has been detected
+	 * since CPU wrote 0x7 value to status reg.
+	 */
+	if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
+		return 0;
+
+	/* propogate the error condition to the CAN stack */
+	skb = alloc_can_err_skb(dev, &cf);
+	if (unlikely(!skb))
+		return 0;
+
+	/*
+	 * check for 'last error code' which tells us the
+	 * type of the last error to occur on the CAN bus
+	 */
+
+	/* common for all type of bus errors */
+	priv->can.can_stats.bus_error++;
+	stats->rx_errors++;
+	cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+	cf->data[2] |= CAN_ERR_PROT_UNSPEC;
+
+	switch (lec_type) {
+	case LEC_STUFF_ERROR:
+		netdev_dbg(dev, "stuff error\n");
+		cf->data[2] |= CAN_ERR_PROT_STUFF;
+		break;
+	case LEC_FORM_ERROR:
+		netdev_dbg(dev, "form error\n");
+		cf->data[2] |= CAN_ERR_PROT_FORM;
+		break;
+	case LEC_ACK_ERROR:
+		netdev_dbg(dev, "ack error\n");
+		cf->data[2] |= (CAN_ERR_PROT_LOC_ACK |
+				CAN_ERR_PROT_LOC_ACK_DEL);
+		break;
+	case LEC_BIT1_ERROR:
+		netdev_dbg(dev, "bit1 error\n");
+		cf->data[2] |= CAN_ERR_PROT_BIT1;
+		break;
+	case LEC_BIT0_ERROR:
+		netdev_dbg(dev, "bit0 error\n");
+		cf->data[2] |= CAN_ERR_PROT_BIT0;
+		break;
+	case LEC_CRC_ERROR:
+		netdev_dbg(dev, "CRC error\n");
+		cf->data[2] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
+				CAN_ERR_PROT_LOC_CRC_DEL);
+		break;
+	default:
+		break;
+	}
+
+	/* set a `lec` value so that we can check for updates later */
+	priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
+
+	netif_receive_skb(skb);
+	stats->rx_packets++;
+	stats->rx_bytes += cf->can_dlc;
+
+	return 1;
+}
+
+static int c_can_poll(struct napi_struct *napi, int quota)
+{
+	u16 irqstatus;
+	int lec_type = 0;
+	int work_done = 0;
+	struct net_device *dev = napi->dev;
+	struct c_can_priv *priv = netdev_priv(dev);
+
+	irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+	if (!irqstatus)
+		goto end;
+
+	/* status events have the highest priority */
+	if (irqstatus == STATUS_INTERRUPT) {
+		priv->current_status = priv->read_reg(priv,
+					&priv->regs->status);
+
+		/* handle Tx/Rx events */
+		if (priv->current_status & STATUS_TXOK)
+			priv->write_reg(priv, &priv->regs->status,
+					priv->current_status & ~STATUS_TXOK);
+
+		if (priv->current_status & STATUS_RXOK)
+			priv->write_reg(priv, &priv->regs->status,
+					priv->current_status & ~STATUS_RXOK);
+
+		/* handle state changes */
+		if ((priv->current_status & STATUS_EWARN) &&
+				(!(priv->last_status & STATUS_EWARN))) {
+			netdev_dbg(dev, "entered error warning state\n");
+			work_done += c_can_handle_state_change(dev,
+						C_CAN_ERROR_WARNING);
+		}
+		if ((priv->current_status & STATUS_EPASS) &&
+				(!(priv->last_status & STATUS_EPASS))) {
+			netdev_dbg(dev, "entered error passive state\n");
+			work_done += c_can_handle_state_change(dev,
+						C_CAN_ERROR_PASSIVE);
+		}
+		if ((priv->current_status & STATUS_BOFF) &&
+				(!(priv->last_status & STATUS_BOFF))) {
+			netdev_dbg(dev, "entered bus off state\n");
+			work_done += c_can_handle_state_change(dev,
+						C_CAN_BUS_OFF);
+		}
+
+		/* handle bus recovery events */
+		if ((!(priv->current_status & STATUS_BOFF)) &&
+				(priv->last_status & STATUS_BOFF)) {
+			netdev_dbg(dev, "left bus off state\n");
+			priv->can.state = CAN_STATE_ERROR_ACTIVE;
+		}
+		if ((!(priv->current_status & STATUS_EPASS)) &&
+				(priv->last_status & STATUS_EPASS)) {
+			netdev_dbg(dev, "left error passive state\n");
+			priv->can.state = CAN_STATE_ERROR_ACTIVE;
+		}
+
+		priv->last_status = priv->current_status;
+
+		/* handle lec errors on the bus */
+		lec_type = c_can_has_and_handle_berr(priv);
+		if (lec_type)
+			work_done += c_can_handle_bus_err(dev, lec_type);
+	} else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) &&
+			(irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) {
+		/* handle events corresponding to receive message objects */
+		work_done += c_can_do_rx_poll(dev, (quota - work_done));
+	} else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) &&
+			(irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) {
+		/* handle events corresponding to transmit message objects */
+		c_can_do_tx(dev);
+	}
+
+end:
+	if (work_done < quota) {
+		napi_complete(napi);
+		/* enable all IRQs */
+		c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS);
+	}
+
+	return work_done;
+}
+
+static irqreturn_t c_can_isr(int irq, void *dev_id)
+{
+	u16 irqstatus;
+	struct net_device *dev = (struct net_device *)dev_id;
+	struct c_can_priv *priv = netdev_priv(dev);
+
+	irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+	if (!irqstatus)
+		return IRQ_NONE;
+
+	/* disable all interrupts and schedule the NAPI */
+	c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+	napi_schedule(&priv->napi);
+
+	return IRQ_HANDLED;
+}
+
+static int c_can_open(struct net_device *dev)
+{
+	int err;
+	struct c_can_priv *priv = netdev_priv(dev);
+
+	/* open the can device */
+	err = open_candev(dev);
+	if (err) {
+		netdev_err(dev, "failed to open can device\n");
+		return err;
+	}
+
+	/* register interrupt handler */
+	err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
+				dev);
+	if (err < 0) {
+		netdev_err(dev, "failed to request interrupt\n");
+		goto exit_irq_fail;
+	}
+
+	/* start the c_can controller */
+	c_can_start(dev);
+
+	napi_enable(&priv->napi);
+	netif_start_queue(dev);
+
+	return 0;
+
+exit_irq_fail:
+	close_candev(dev);
+	return err;
+}
+
+static int c_can_close(struct net_device *dev)
+{
+	struct c_can_priv *priv = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+	napi_disable(&priv->napi);
+	c_can_stop(dev);
+	free_irq(dev->irq, dev);
+	close_candev(dev);
+
+	return 0;
+}
+
+struct net_device *alloc_c_can_dev(void)
+{
+	struct net_device *dev;
+	struct c_can_priv *priv;
+
+	dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
+	if (!dev)
+		return NULL;
+
+	priv = netdev_priv(dev);
+	netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
+
+	priv->dev = dev;
+	priv->can.bittiming_const = &c_can_bittiming_const;
+	priv->can.do_set_mode = c_can_set_mode;
+	priv->can.do_get_berr_counter = c_can_get_berr_counter;
+	priv->can.ctrlmode_supported = CAN_CTRLMODE_ONE_SHOT |
+					CAN_CTRLMODE_LOOPBACK |
+					CAN_CTRLMODE_LISTENONLY |
+					CAN_CTRLMODE_BERR_REPORTING;
+
+	return dev;
+}
+EXPORT_SYMBOL_GPL(alloc_c_can_dev);
+
+void free_c_can_dev(struct net_device *dev)
+{
+	free_candev(dev);
+}
+EXPORT_SYMBOL_GPL(free_c_can_dev);
+
+static const struct net_device_ops c_can_netdev_ops = {
+	.ndo_open = c_can_open,
+	.ndo_stop = c_can_close,
+	.ndo_start_xmit = c_can_start_xmit,
+};
+
+int register_c_can_dev(struct net_device *dev)
+{
+	dev->flags |= IFF_ECHO;	/* we support local echo */
+	dev->netdev_ops = &c_can_netdev_ops;
+
+	return register_candev(dev);
+}
+EXPORT_SYMBOL_GPL(register_c_can_dev);
+
+void unregister_c_can_dev(struct net_device *dev)
+{
+	struct c_can_priv *priv = netdev_priv(dev);
+
+	/* disable all interrupts */
+	c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS);
+
+	unregister_candev(dev);
+}
+EXPORT_SYMBOL_GPL(unregister_c_can_dev);
+
+MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
new file mode 100644
index 0000000..9b7fbef
--- /dev/null
+++ b/drivers/net/can/c_can/c_can.h
@@ -0,0 +1,86 @@
+/*
+ * CAN bus driver for Bosch C_CAN controller
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Bhupesh Sharma <bhupesh.sharma@st.com>
+ *
+ * Borrowed heavily from the C_CAN driver originally written by:
+ * Copyright (C) 2007
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
+ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
+ *
+ * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
+ * Bosch C_CAN user manual can be obtained from:
+ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
+ * users_manual_c_can.pdf
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef C_CAN_H
+#define C_CAN_H
+
+/* c_can IF registers */
+struct c_can_if_regs {
+	u16 com_req;
+	u16 com_mask;
+	u16 mask1;
+	u16 mask2;
+	u16 arb1;
+	u16 arb2;
+	u16 msg_cntrl;
+	u16 data[4];
+	u16 _reserved[13];
+};
+
+/* c_can hardware registers */
+struct c_can_regs {
+	u16 control;
+	u16 status;
+	u16 err_cnt;
+	u16 btr;
+	u16 interrupt;
+	u16 test;
+	u16 brp_ext;
+	u16 _reserved1;
+	struct c_can_if_regs ifregs[2]; /* [0] = IF1 and [1] = IF2 */
+	u16 _reserved2[8];
+	u16 txrqst1;
+	u16 txrqst2;
+	u16 _reserved3[6];
+	u16 newdat1;
+	u16 newdat2;
+	u16 _reserved4[6];
+	u16 intpnd1;
+	u16 intpnd2;
+	u16 _reserved5[6];
+	u16 msgval1;
+	u16 msgval2;
+	u16 _reserved6[6];
+};
+
+/* c_can private data structure */
+struct c_can_priv {
+	struct can_priv can;	/* must be the first member */
+	struct napi_struct napi;
+	struct net_device *dev;
+	int tx_object;
+	int current_status;
+	int last_status;
+	u16 (*read_reg) (struct c_can_priv *priv, void *reg);
+	void (*write_reg) (struct c_can_priv *priv, void *reg, u16 val);
+	struct c_can_regs __iomem *regs;
+	unsigned long irq_flags; /* for request_irq() */
+	unsigned int tx_next;
+	unsigned int tx_echo;
+	void *priv;		/* for board-specific data */
+};
+
+struct net_device *alloc_c_can_dev(void);
+void free_c_can_dev(struct net_device *dev);
+int register_c_can_dev(struct net_device *dev);
+void unregister_c_can_dev(struct net_device *dev);
+
+#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
new file mode 100644
index 0000000..e629b96
--- /dev/null
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -0,0 +1,215 @@
+/*
+ * Platform CAN bus driver for Bosch C_CAN controller
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Bhupesh Sharma <bhupesh.sharma@st.com>
+ *
+ * Borrowed heavily from the C_CAN driver originally written by:
+ * Copyright (C) 2007
+ * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
+ * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
+ *
+ * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
+ * Bosch C_CAN user manual can be obtained from:
+ * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
+ * users_manual_c_can.pdf
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <linux/can/dev.h>
+
+#include "c_can.h"
+
+/*
+ * 16-bit c_can registers can be arranged differently in the memory
+ * architecture of different implementations. For example: 16-bit
+ * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
+ * Handle the same by providing a common read/write interface.
+ */
+static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
+						void *reg)
+{
+	return readw(reg);
+}
+
+static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
+						void *reg, u16 val)
+{
+	writew(val, reg);
+}
+
+static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
+						void *reg)
+{
+	return readw(reg + (long)reg - (long)priv->regs);
+}
+
+static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
+						void *reg, u16 val)
+{
+	writew(val, reg + (long)reg - (long)priv->regs);
+}
+
+static int __devinit c_can_plat_probe(struct platform_device *pdev)
+{
+	int ret;
+	void __iomem *addr;
+	struct net_device *dev;
+	struct c_can_priv *priv;
+	struct resource *mem, *irq;
+#ifdef CONFIG_HAVE_CLK
+	struct clk *clk;
+
+	/* get the appropriate clk */
+	clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "no clock defined\n");
+		ret = -ENODEV;
+		goto exit;
+	}
+#endif
+
+	/* get the platform data */
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!mem || (irq <= 0)) {
+		ret = -ENODEV;
+		goto exit_free_clk;
+	}
+
+	if (!request_mem_region(mem->start, resource_size(mem),
+				KBUILD_MODNAME)) {
+		dev_err(&pdev->dev, "resource unavailable\n");
+		ret = -ENODEV;
+		goto exit_free_clk;
+	}
+
+	addr = ioremap(mem->start, resource_size(mem));
+	if (!addr) {
+		dev_err(&pdev->dev, "failed to map can port\n");
+		ret = -ENOMEM;
+		goto exit_release_mem;
+	}
+
+	/* allocate the c_can device */
+	dev = alloc_c_can_dev();
+	if (!dev) {
+		ret = -ENOMEM;
+		goto exit_iounmap;
+	}
+
+	priv = netdev_priv(dev);
+
+	dev->irq = irq->start;
+	priv->regs = addr;
+#ifdef CONFIG_HAVE_CLK
+	priv->can.clock.freq = clk_get_rate(clk);
+	priv->priv = clk;
+#endif
+
+	switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
+	case IORESOURCE_MEM_32BIT:
+		priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
+		priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
+		break;
+	case IORESOURCE_MEM_16BIT:
+	default:
+		priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
+		priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+		break;
+	}
+
+	platform_set_drvdata(pdev, dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	ret = register_c_can_dev(dev);
+	if (ret) {
+		dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
+			KBUILD_MODNAME, ret);
+		goto exit_free_device;
+	}
+
+	dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
+		 KBUILD_MODNAME, priv->regs, dev->irq);
+	return 0;
+
+exit_free_device:
+	platform_set_drvdata(pdev, NULL);
+	free_c_can_dev(dev);
+exit_iounmap:
+	iounmap(addr);
+exit_release_mem:
+	release_mem_region(mem->start, resource_size(mem));
+exit_free_clk:
+#ifdef CONFIG_HAVE_CLK
+	clk_put(clk);
+exit:
+#endif
+	dev_err(&pdev->dev, "probe failed\n");
+
+	return ret;
+}
+
+static int __devexit c_can_plat_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct c_can_priv *priv = netdev_priv(dev);
+	struct resource *mem;
+
+	unregister_c_can_dev(dev);
+	platform_set_drvdata(pdev, NULL);
+
+	free_c_can_dev(dev);
+	iounmap(priv->regs);
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(mem->start, resource_size(mem));
+
+#ifdef CONFIG_HAVE_CLK
+	clk_put(priv->priv);
+#endif
+
+	return 0;
+}
+
+static struct platform_driver c_can_plat_driver = {
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.owner = THIS_MODULE,
+	},
+	.probe = c_can_plat_probe,
+	.remove = __devexit_p(c_can_plat_remove),
+};
+
+static int __init c_can_plat_init(void)
+{
+	return platform_driver_register(&c_can_plat_driver);
+}
+module_init(c_can_plat_init);
+
+static void __exit c_can_plat_exit(void)
+{
+	platform_driver_unregister(&c_can_plat_driver);
+}
+module_exit(c_can_plat_exit);
+
+MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Platform CAN bus driver for Bosch C_CAN controller");
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index b9a6d7a..366f5cc 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1618,7 +1618,7 @@
 	return count;
 }
 
-static DEVICE_ATTR(termination, S_IWUGO | S_IRUGO, ican3_sysfs_show_term,
+static DEVICE_ATTR(termination, S_IWUSR | S_IRUGO, ican3_sysfs_show_term,
 						   ican3_sysfs_set_term);
 
 static struct attribute *ican3_sysfs_attrs[] = {
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 7ab534a..7513c45 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -940,7 +940,7 @@
 		goto open_unlock;
 	}
 
-	priv->wq = create_freezeable_workqueue("mcp251x_wq");
+	priv->wq = create_freezable_workqueue("mcp251x_wq");
 	INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
 	INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
 
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index 27d1d39..d387069 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -1,5 +1,5 @@
 config CAN_MSCAN
-	depends on CAN_DEV && (PPC || M68K || M68KNOMMU)
+	depends on CAN_DEV && (PPC || M68K)
 	tristate "Support for Freescale MSCAN based chips"
 	---help---
 	  The Motorola Scalable Controller Area Network (MSCAN) definition
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index c42e972..e54712b 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -185,7 +185,7 @@
 
 static struct can_bittiming_const pch_can_bittiming_const = {
 	.name = KBUILD_MODNAME,
-	.tseg1_min = 1,
+	.tseg1_min = 2,
 	.tseg1_max = 16,
 	.tseg2_min = 1,
 	.tseg2_max = 8,
@@ -959,13 +959,13 @@
 	struct pch_can_priv *priv = netdev_priv(ndev);
 
 	unregister_candev(priv->ndev);
-	pci_iounmap(pdev, priv->regs);
 	if (priv->use_msi)
 		pci_disable_msi(priv->dev);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
 	pci_set_drvdata(pdev, NULL);
 	pch_can_reset(priv);
+	pci_iounmap(pdev, priv->regs);
 	free_candev(priv->ndev);
 }
 
@@ -1238,6 +1238,7 @@
 		priv->use_msi = 0;
 	} else {
 		netdev_err(ndev, "PCH CAN opened with MSI\n");
+		pci_set_master(pdev);
 		priv->use_msi = 1;
 	}
 
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
new file mode 100644
index 0000000..5de46a9
--- /dev/null
+++ b/drivers/net/can/softing/Kconfig
@@ -0,0 +1,30 @@
+config CAN_SOFTING
+	tristate "Softing Gmbh CAN generic support"
+	depends on CAN_DEV && HAS_IOMEM
+	---help---
+	  Support for CAN cards from Softing Gmbh & some cards
+	  from Vector Gmbh.
+	  Softing Gmbh CAN cards come with 1 or 2 physical busses.
+	  Those cards typically use Dual Port RAM to communicate
+	  with the host CPU. The interface is then identical for PCI
+	  and PCMCIA cards. This driver operates on a platform device,
+	  which has been created by softing_cs or softing_pci driver.
+	  Warning:
+	  The API of the card does not allow fine control per bus, but
+	  controls the 2 busses on the card together.
+	  As such, some actions (start/stop/busoff recovery) on 1 bus
+	  must bring down the other bus too temporarily.
+
+config CAN_SOFTING_CS
+	tristate "Softing Gmbh CAN pcmcia cards"
+	depends on PCMCIA
+	depends on CAN_SOFTING
+	---help---
+	  Support for PCMCIA cards from Softing Gmbh & some cards
+	  from Vector Gmbh.
+	  You need firmware for these, which you can get at
+	  http://developer.berlios.de/projects/socketcan/
+	  This version of the driver is written against
+	  firmware version 4.6 (softing-fw-4.6-binaries.tar.gz)
+	  In order to use the card as CAN device, you need the Softing generic
+	  support too.
diff --git a/drivers/net/can/softing/Makefile b/drivers/net/can/softing/Makefile
new file mode 100644
index 0000000..c5e5016
--- /dev/null
+++ b/drivers/net/can/softing/Makefile
@@ -0,0 +1,6 @@
+
+softing-y := softing_main.o softing_fw.o
+obj-$(CONFIG_CAN_SOFTING) += softing.o
+obj-$(CONFIG_CAN_SOFTING_CS) += softing_cs.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/softing/softing.h b/drivers/net/can/softing/softing.h
new file mode 100644
index 0000000..7ec9f4d
--- /dev/null
+++ b/drivers/net/can/softing/softing.h
@@ -0,0 +1,167 @@
+/*
+ * softing common interfaces
+ *
+ * by Kurt Van Dijck, 2008-2010
+ */
+
+#include <linux/atomic.h>
+#include <linux/netdevice.h>
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+
+#include "softing_platform.h"
+
+struct softing;
+
+struct softing_priv {
+	struct can_priv can; /* must be the first member! */
+	struct net_device *netdev;
+	struct softing *card;
+	struct {
+		int pending;
+		/* variables wich hold the circular buffer */
+		int echo_put;
+		int echo_get;
+	} tx;
+	struct can_bittiming_const btr_const;
+	int index;
+	uint8_t output;
+	uint16_t chip;
+};
+#define netdev2softing(netdev)	((struct softing_priv *)netdev_priv(netdev))
+
+struct softing {
+	const struct softing_platform_data *pdat;
+	struct platform_device *pdev;
+	struct net_device *net[2];
+	spinlock_t spin; /* protect this structure & DPRAM access */
+	ktime_t ts_ref;
+	ktime_t ts_overflow; /* timestamp overflow value, in ktime */
+
+	struct {
+		/* indication of firmware status */
+		int up;
+		/* protection of the 'up' variable */
+		struct mutex lock;
+	} fw;
+	struct {
+		int nr;
+		int requested;
+		int svc_count;
+		unsigned int dpram_position;
+	} irq;
+	struct {
+		int pending;
+		int last_bus;
+		/*
+		 * keep the bus that last tx'd a message,
+		 * in order to let every netdev queue resume
+		 */
+	} tx;
+	__iomem uint8_t *dpram;
+	unsigned long dpram_phys;
+	unsigned long dpram_size;
+	struct {
+		uint16_t fw_version, hw_version, license, serial;
+		uint16_t chip[2];
+		unsigned int freq; /* remote cpu's operating frequency */
+	} id;
+};
+
+extern int softing_default_output(struct net_device *netdev);
+
+extern ktime_t softing_raw2ktime(struct softing *card, u32 raw);
+
+extern int softing_chip_poweron(struct softing *card);
+
+extern int softing_bootloader_command(struct softing *card, int16_t cmd,
+		const char *msg);
+
+/* Load firmware after reset */
+extern int softing_load_fw(const char *file, struct softing *card,
+			__iomem uint8_t *virt, unsigned int size, int offset);
+
+/* Load final application firmware after bootloader */
+extern int softing_load_app_fw(const char *file, struct softing *card);
+
+/*
+ * enable or disable irq
+ * only called with fw.lock locked
+ */
+extern int softing_enable_irq(struct softing *card, int enable);
+
+/* start/stop 1 bus on card */
+extern int softing_startstop(struct net_device *netdev, int up);
+
+/* netif_rx() */
+extern int softing_netdev_rx(struct net_device *netdev,
+		const struct can_frame *msg, ktime_t ktime);
+
+/* SOFTING DPRAM mappings */
+#define DPRAM_RX		0x0000
+	#define DPRAM_RX_SIZE	32
+	#define DPRAM_RX_CNT	16
+#define DPRAM_RX_RD		0x0201	/* uint8_t */
+#define DPRAM_RX_WR		0x0205	/* uint8_t */
+#define DPRAM_RX_LOST		0x0207	/* uint8_t */
+
+#define DPRAM_FCT_PARAM		0x0300	/* int16_t [20] */
+#define DPRAM_FCT_RESULT	0x0328	/* int16_t */
+#define DPRAM_FCT_HOST		0x032b	/* uint16_t */
+
+#define DPRAM_INFO_BUSSTATE	0x0331	/* uint16_t */
+#define DPRAM_INFO_BUSSTATE2	0x0335	/* uint16_t */
+#define DPRAM_INFO_ERRSTATE	0x0339	/* uint16_t */
+#define DPRAM_INFO_ERRSTATE2	0x033d	/* uint16_t */
+#define DPRAM_RESET		0x0341	/* uint16_t */
+#define DPRAM_CLR_RECV_FIFO	0x0345	/* uint16_t */
+#define DPRAM_RESET_TIME	0x034d	/* uint16_t */
+#define DPRAM_TIME		0x0350	/* uint64_t */
+#define DPRAM_WR_START		0x0358	/* uint8_t */
+#define DPRAM_WR_END		0x0359	/* uint8_t */
+#define DPRAM_RESET_RX_FIFO	0x0361	/* uint16_t */
+#define DPRAM_RESET_TX_FIFO	0x0364	/* uint8_t */
+#define DPRAM_READ_FIFO_LEVEL	0x0365	/* uint8_t */
+#define DPRAM_RX_FIFO_LEVEL	0x0366	/* uint16_t */
+#define DPRAM_TX_FIFO_LEVEL	0x0366	/* uint16_t */
+
+#define DPRAM_TX		0x0400	/* uint16_t */
+	#define DPRAM_TX_SIZE	16
+	#define DPRAM_TX_CNT	32
+#define DPRAM_TX_RD		0x0601	/* uint8_t */
+#define DPRAM_TX_WR		0x0605	/* uint8_t */
+
+#define DPRAM_COMMAND		0x07e0	/* uint16_t */
+#define DPRAM_RECEIPT		0x07f0	/* uint16_t */
+#define DPRAM_IRQ_TOHOST	0x07fe	/* uint8_t */
+#define DPRAM_IRQ_TOCARD	0x07ff	/* uint8_t */
+
+#define DPRAM_V2_RESET		0x0e00	/* uint8_t */
+#define DPRAM_V2_IRQ_TOHOST	0x0e02	/* uint8_t */
+
+#define TXMAX	(DPRAM_TX_CNT - 1)
+
+/* DPRAM return codes */
+#define RES_NONE	0
+#define RES_OK		1
+#define RES_NOK		2
+#define RES_UNKNOWN	3
+/* DPRAM flags */
+#define CMD_TX		0x01
+#define CMD_ACK		0x02
+#define CMD_XTD		0x04
+#define CMD_RTR		0x08
+#define CMD_ERR		0x10
+#define CMD_BUS2	0x80
+
+/* returned fifo entry bus state masks */
+#define SF_MASK_BUSOFF		0x80
+#define SF_MASK_EPASSIVE	0x60
+
+/* bus states */
+#define STATE_BUSOFF	2
+#define STATE_EPASSIVE	1
+#define STATE_EACTIVE	0
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
new file mode 100644
index 0000000..c11bb4d
--- /dev/null
+++ b/drivers/net/can/softing/softing_cs.c
@@ -0,0 +1,360 @@
+/*
+ * Copyright (C) 2008-2010
+ *
+ * - Kurt Van Dijck, EIA Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include "softing_platform.h"
+
+static int softingcs_index;
+static spinlock_t softingcs_index_lock;
+
+static int softingcs_reset(struct platform_device *pdev, int v);
+static int softingcs_enable_irq(struct platform_device *pdev, int v);
+
+/*
+ * platform_data descriptions
+ */
+#define MHZ (1000*1000)
+static const struct softing_platform_data softingcs_platform_data[] = {
+{
+	.name = "CANcard",
+	.manf = 0x0168, .prod = 0x001,
+	.generation = 1,
+	.nbus = 2,
+	.freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4,
+	.dpram_size = 0x0800,
+	.boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+	.load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+	.app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+	.reset = softingcs_reset,
+	.enable_irq = softingcs_enable_irq,
+}, {
+	.name = "CANcard-NEC",
+	.manf = 0x0168, .prod = 0x002,
+	.generation = 1,
+	.nbus = 2,
+	.freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4,
+	.dpram_size = 0x0800,
+	.boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+	.load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+	.app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+	.reset = softingcs_reset,
+	.enable_irq = softingcs_enable_irq,
+}, {
+	.name = "CANcard-SJA",
+	.manf = 0x0168, .prod = 0x004,
+	.generation = 1,
+	.nbus = 2,
+	.freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4,
+	.dpram_size = 0x0800,
+	.boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+	.load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+	.app = {0x0010, 0x0d0000, fw_dir "cansja.bin",},
+	.reset = softingcs_reset,
+	.enable_irq = softingcs_enable_irq,
+}, {
+	.name = "CANcard-2",
+	.manf = 0x0168, .prod = 0x005,
+	.generation = 2,
+	.nbus = 2,
+	.freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
+	.dpram_size = 0x1000,
+	.boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
+	.load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
+	.app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
+	.reset = softingcs_reset,
+	.enable_irq = NULL,
+}, {
+	.name = "Vector-CANcard",
+	.manf = 0x0168, .prod = 0x081,
+	.generation = 1,
+	.nbus = 2,
+	.freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4,
+	.dpram_size = 0x0800,
+	.boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+	.load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+	.app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+	.reset = softingcs_reset,
+	.enable_irq = softingcs_enable_irq,
+}, {
+	.name = "Vector-CANcard-SJA",
+	.manf = 0x0168, .prod = 0x084,
+	.generation = 1,
+	.nbus = 2,
+	.freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4,
+	.dpram_size = 0x0800,
+	.boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+	.load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+	.app = {0x0010, 0x0d0000, fw_dir "cansja.bin",},
+	.reset = softingcs_reset,
+	.enable_irq = softingcs_enable_irq,
+}, {
+	.name = "Vector-CANcard-2",
+	.manf = 0x0168, .prod = 0x085,
+	.generation = 2,
+	.nbus = 2,
+	.freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
+	.dpram_size = 0x1000,
+	.boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
+	.load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
+	.app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
+	.reset = softingcs_reset,
+	.enable_irq = NULL,
+}, {
+	.name = "EDICcard-NEC",
+	.manf = 0x0168, .prod = 0x102,
+	.generation = 1,
+	.nbus = 2,
+	.freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4,
+	.dpram_size = 0x0800,
+	.boot = {0x0000, 0x000000, fw_dir "bcard.bin",},
+	.load = {0x0120, 0x00f600, fw_dir "ldcard.bin",},
+	.app = {0x0010, 0x0d0000, fw_dir "cancard.bin",},
+	.reset = softingcs_reset,
+	.enable_irq = softingcs_enable_irq,
+}, {
+	.name = "EDICcard-2",
+	.manf = 0x0168, .prod = 0x105,
+	.generation = 2,
+	.nbus = 2,
+	.freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4,
+	.dpram_size = 0x1000,
+	.boot = {0x0000, 0x000000, fw_dir "bcard2.bin",},
+	.load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",},
+	.app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",},
+	.reset = softingcs_reset,
+	.enable_irq = NULL,
+}, {
+	0, 0,
+},
+};
+
+MODULE_FIRMWARE(fw_dir "bcard.bin");
+MODULE_FIRMWARE(fw_dir "ldcard.bin");
+MODULE_FIRMWARE(fw_dir "cancard.bin");
+MODULE_FIRMWARE(fw_dir "cansja.bin");
+
+MODULE_FIRMWARE(fw_dir "bcard2.bin");
+MODULE_FIRMWARE(fw_dir "ldcard2.bin");
+MODULE_FIRMWARE(fw_dir "cancrd2.bin");
+
+static __devinit const struct softing_platform_data
+*softingcs_find_platform_data(unsigned int manf, unsigned int prod)
+{
+	const struct softing_platform_data *lp;
+
+	for (lp = softingcs_platform_data; lp->manf; ++lp) {
+		if ((lp->manf == manf) && (lp->prod == prod))
+			return lp;
+	}
+	return NULL;
+}
+
+/*
+ * platformdata callbacks
+ */
+static int softingcs_reset(struct platform_device *pdev, int v)
+{
+	struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent);
+
+	dev_dbg(&pdev->dev, "pcmcia config [2] %02x\n", v ? 0 : 0x20);
+	return pcmcia_write_config_byte(pcmcia, 2, v ? 0 : 0x20);
+}
+
+static int softingcs_enable_irq(struct platform_device *pdev, int v)
+{
+	struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent);
+
+	dev_dbg(&pdev->dev, "pcmcia config [0] %02x\n", v ? 0x60 : 0);
+	return pcmcia_write_config_byte(pcmcia, 0, v ? 0x60 : 0);
+}
+
+/*
+ * pcmcia check
+ */
+static __devinit int softingcs_probe_config(struct pcmcia_device *pcmcia,
+		void *priv_data)
+{
+	struct softing_platform_data *pdat = priv_data;
+	struct resource *pres;
+	int memspeed = 0;
+
+	WARN_ON(!pdat);
+	pres = pcmcia->resource[PCMCIA_IOMEM_0];
+	if (resource_size(pres) < 0x1000)
+		return -ERANGE;
+
+	pres->flags |= WIN_MEMORY_TYPE_CM | WIN_ENABLE;
+	if (pdat->generation < 2) {
+		pres->flags |= WIN_USE_WAIT | WIN_DATA_WIDTH_8;
+		memspeed = 3;
+	} else {
+		pres->flags |= WIN_DATA_WIDTH_16;
+	}
+	return pcmcia_request_window(pcmcia, pres, memspeed);
+}
+
+static __devexit void softingcs_remove(struct pcmcia_device *pcmcia)
+{
+	struct platform_device *pdev = pcmcia->priv;
+
+	/* free bits */
+	platform_device_unregister(pdev);
+	/* release pcmcia stuff */
+	pcmcia_disable_device(pcmcia);
+}
+
+/*
+ * platform_device wrapper
+ * pdev->resource has 2 entries: io & irq
+ */
+static void softingcs_pdev_release(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	kfree(pdev);
+}
+
+static __devinit int softingcs_probe(struct pcmcia_device *pcmcia)
+{
+	int ret;
+	struct platform_device *pdev;
+	const struct softing_platform_data *pdat;
+	struct resource *pres;
+	struct dev {
+		struct platform_device pdev;
+		struct resource res[2];
+	} *dev;
+
+	/* find matching platform_data */
+	pdat = softingcs_find_platform_data(pcmcia->manf_id, pcmcia->card_id);
+	if (!pdat)
+		return -ENOTTY;
+
+	/* setup pcmcia device */
+	pcmcia->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IOMEM |
+		CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC;
+	ret = pcmcia_loop_config(pcmcia, softingcs_probe_config, (void *)pdat);
+	if (ret)
+		goto pcmcia_failed;
+
+	ret = pcmcia_enable_device(pcmcia);
+	if (ret < 0)
+		goto pcmcia_failed;
+
+	pres = pcmcia->resource[PCMCIA_IOMEM_0];
+	if (!pres) {
+		ret = -EBADF;
+		goto pcmcia_bad;
+	}
+
+	/* create softing platform device */
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev) {
+		ret = -ENOMEM;
+		goto mem_failed;
+	}
+	dev->pdev.resource = dev->res;
+	dev->pdev.num_resources = ARRAY_SIZE(dev->res);
+	dev->pdev.dev.release = softingcs_pdev_release;
+
+	pdev = &dev->pdev;
+	pdev->dev.platform_data = (void *)pdat;
+	pdev->dev.parent = &pcmcia->dev;
+	pcmcia->priv = pdev;
+
+	/* platform device resources */
+	pdev->resource[0].flags = IORESOURCE_MEM;
+	pdev->resource[0].start = pres->start;
+	pdev->resource[0].end = pres->end;
+
+	pdev->resource[1].flags = IORESOURCE_IRQ;
+	pdev->resource[1].start = pcmcia->irq;
+	pdev->resource[1].end = pdev->resource[1].start;
+
+	/* platform device setup */
+	spin_lock(&softingcs_index_lock);
+	pdev->id = softingcs_index++;
+	spin_unlock(&softingcs_index_lock);
+	pdev->name = "softing";
+	dev_set_name(&pdev->dev, "softingcs.%i", pdev->id);
+	ret = platform_device_register(pdev);
+	if (ret < 0)
+		goto platform_failed;
+
+	dev_info(&pcmcia->dev, "created %s\n", dev_name(&pdev->dev));
+	return 0;
+
+platform_failed:
+	kfree(dev);
+mem_failed:
+pcmcia_bad:
+pcmcia_failed:
+	pcmcia_disable_device(pcmcia);
+	pcmcia->priv = NULL;
+	return ret ?: -ENODEV;
+}
+
+static /*const*/ struct pcmcia_device_id softingcs_ids[] = {
+	/* softing */
+	PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0001),
+	PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0002),
+	PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0004),
+	PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0005),
+	/* vector, manufacturer? */
+	PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0081),
+	PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0084),
+	PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0085),
+	/* EDIC */
+	PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0102),
+	PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0105),
+	PCMCIA_DEVICE_NULL,
+};
+
+MODULE_DEVICE_TABLE(pcmcia, softingcs_ids);
+
+static struct pcmcia_driver softingcs_driver = {
+	.owner		= THIS_MODULE,
+	.name		= "softingcs",
+	.id_table	= softingcs_ids,
+	.probe		= softingcs_probe,
+	.remove		= __devexit_p(softingcs_remove),
+};
+
+static int __init softingcs_start(void)
+{
+	spin_lock_init(&softingcs_index_lock);
+	return pcmcia_register_driver(&softingcs_driver);
+}
+
+static void __exit softingcs_stop(void)
+{
+	pcmcia_unregister_driver(&softingcs_driver);
+}
+
+module_init(softingcs_start);
+module_exit(softingcs_stop);
+
+MODULE_DESCRIPTION("softing CANcard driver"
+		", links PCMCIA card to softing driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
new file mode 100644
index 0000000..b520784
--- /dev/null
+++ b/drivers/net/can/softing/softing_fw.c
@@ -0,0 +1,691 @@
+/*
+ * Copyright (C) 2008-2010
+ *
+ * - Kurt Van Dijck, EIA Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/firmware.h>
+#include <linux/sched.h>
+#include <asm/div64.h>
+
+#include "softing.h"
+
+/*
+ * low level DPRAM command.
+ * Make sure that card->dpram[DPRAM_FCT_HOST] is preset
+ */
+static int _softing_fct_cmd(struct softing *card, int16_t cmd, uint16_t vector,
+		const char *msg)
+{
+	int ret;
+	unsigned long stamp;
+
+	iowrite16(cmd, &card->dpram[DPRAM_FCT_PARAM]);
+	iowrite8(vector >> 8, &card->dpram[DPRAM_FCT_HOST + 1]);
+	iowrite8(vector, &card->dpram[DPRAM_FCT_HOST]);
+	/* be sure to flush this to the card */
+	wmb();
+	stamp = jiffies + 1 * HZ;
+	/* wait for card */
+	do {
+		/* DPRAM_FCT_HOST is _not_ aligned */
+		ret = ioread8(&card->dpram[DPRAM_FCT_HOST]) +
+			(ioread8(&card->dpram[DPRAM_FCT_HOST + 1]) << 8);
+		/* don't have any cached variables */
+		rmb();
+		if (ret == RES_OK)
+			/* read return-value now */
+			return ioread16(&card->dpram[DPRAM_FCT_RESULT]);
+
+		if ((ret != vector) || time_after(jiffies, stamp))
+			break;
+		/* process context => relax */
+		usleep_range(500, 10000);
+	} while (1);
+
+	ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED;
+	dev_alert(&card->pdev->dev, "firmware %s failed (%i)\n", msg, ret);
+	return ret;
+}
+
+static int softing_fct_cmd(struct softing *card, int16_t cmd, const char *msg)
+{
+	int ret;
+
+	ret = _softing_fct_cmd(card, cmd, 0, msg);
+	if (ret > 0) {
+		dev_alert(&card->pdev->dev, "%s returned %u\n", msg, ret);
+		ret = -EIO;
+	}
+	return ret;
+}
+
+int softing_bootloader_command(struct softing *card, int16_t cmd,
+		const char *msg)
+{
+	int ret;
+	unsigned long stamp;
+
+	iowrite16(RES_NONE, &card->dpram[DPRAM_RECEIPT]);
+	iowrite16(cmd, &card->dpram[DPRAM_COMMAND]);
+	/* be sure to flush this to the card */
+	wmb();
+	stamp = jiffies + 3 * HZ;
+	/* wait for card */
+	do {
+		ret = ioread16(&card->dpram[DPRAM_RECEIPT]);
+		/* don't have any cached variables */
+		rmb();
+		if (ret == RES_OK)
+			return 0;
+		if (time_after(jiffies, stamp))
+			break;
+		/* process context => relax */
+		usleep_range(500, 10000);
+	} while (!signal_pending(current));
+
+	ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED;
+	dev_alert(&card->pdev->dev, "bootloader %s failed (%i)\n", msg, ret);
+	return ret;
+}
+
+static int fw_parse(const uint8_t **pmem, uint16_t *ptype, uint32_t *paddr,
+		uint16_t *plen, const uint8_t **pdat)
+{
+	uint16_t checksum[2];
+	const uint8_t *mem;
+	const uint8_t *end;
+
+	/*
+	 * firmware records are a binary, unaligned stream composed of:
+	 * uint16_t type;
+	 * uint32_t addr;
+	 * uint16_t len;
+	 * uint8_t dat[len];
+	 * uint16_t checksum;
+	 * all values in little endian.
+	 * We could define a struct for this, with __attribute__((packed)),
+	 * but would that solve the alignment in _all_ cases (cfr. the
+	 * struct itself may be an odd address)?
+	 *
+	 * I chose to use leXX_to_cpup() since this solves both
+	 * endianness & alignment.
+	 */
+	mem = *pmem;
+	*ptype = le16_to_cpup((void *)&mem[0]);
+	*paddr = le32_to_cpup((void *)&mem[2]);
+	*plen = le16_to_cpup((void *)&mem[6]);
+	*pdat = &mem[8];
+	/* verify checksum */
+	end = &mem[8 + *plen];
+	checksum[0] = le16_to_cpup((void *)end);
+	for (checksum[1] = 0; mem < end; ++mem)
+		checksum[1] += *mem;
+	if (checksum[0] != checksum[1])
+		return -EINVAL;
+	/* increment */
+	*pmem += 10 + *plen;
+	return 0;
+}
+
+int softing_load_fw(const char *file, struct softing *card,
+		__iomem uint8_t *dpram, unsigned int size, int offset)
+{
+	const struct firmware *fw;
+	int ret;
+	const uint8_t *mem, *end, *dat;
+	uint16_t type, len;
+	uint32_t addr;
+	uint8_t *buf = NULL;
+	int buflen = 0;
+	int8_t type_end = 0;
+
+	ret = request_firmware(&fw, file, &card->pdev->dev);
+	if (ret < 0)
+		return ret;
+	dev_dbg(&card->pdev->dev, "%s, firmware(%s) got %u bytes"
+		", offset %c0x%04x\n",
+		card->pdat->name, file, (unsigned int)fw->size,
+		(offset >= 0) ? '+' : '-', (unsigned int)abs(offset));
+	/* parse the firmware */
+	mem = fw->data;
+	end = &mem[fw->size];
+	/* look for header record */
+	ret = fw_parse(&mem, &type, &addr, &len, &dat);
+	if (ret < 0)
+		goto failed;
+	if (type != 0xffff)
+		goto failed;
+	if (strncmp("Structured Binary Format, Softing GmbH" , dat, len)) {
+		ret = -EINVAL;
+		goto failed;
+	}
+	/* ok, we had a header */
+	while (mem < end) {
+		ret = fw_parse(&mem, &type, &addr, &len, &dat);
+		if (ret < 0)
+			goto failed;
+		if (type == 3) {
+			/* start address, not used here */
+			continue;
+		} else if (type == 1) {
+			/* eof */
+			type_end = 1;
+			break;
+		} else if (type != 0) {
+			ret = -EINVAL;
+			goto failed;
+		}
+
+		if ((addr + len + offset) > size)
+			goto failed;
+		memcpy_toio(&dpram[addr + offset], dat, len);
+		/* be sure to flush caches from IO space */
+		mb();
+		if (len > buflen) {
+			/* align buflen */
+			buflen = (len + (1024-1)) & ~(1024-1);
+			buf = krealloc(buf, buflen, GFP_KERNEL);
+			if (!buf) {
+				ret = -ENOMEM;
+				goto failed;
+			}
+		}
+		/* verify record data */
+		memcpy_fromio(buf, &dpram[addr + offset], len);
+		if (memcmp(buf, dat, len)) {
+			/* is not ok */
+			dev_alert(&card->pdev->dev, "DPRAM readback failed\n");
+			ret = -EIO;
+			goto failed;
+		}
+	}
+	if (!type_end)
+		/* no end record seen */
+		goto failed;
+	ret = 0;
+failed:
+	kfree(buf);
+	release_firmware(fw);
+	if (ret < 0)
+		dev_info(&card->pdev->dev, "firmware %s failed\n", file);
+	return ret;
+}
+
+int softing_load_app_fw(const char *file, struct softing *card)
+{
+	const struct firmware *fw;
+	const uint8_t *mem, *end, *dat;
+	int ret, j;
+	uint16_t type, len;
+	uint32_t addr, start_addr = 0;
+	unsigned int sum, rx_sum;
+	int8_t type_end = 0, type_entrypoint = 0;
+
+	ret = request_firmware(&fw, file, &card->pdev->dev);
+	if (ret) {
+		dev_alert(&card->pdev->dev, "request_firmware(%s) got %i\n",
+			file, ret);
+		return ret;
+	}
+	dev_dbg(&card->pdev->dev, "firmware(%s) got %lu bytes\n",
+		file, (unsigned long)fw->size);
+	/* parse the firmware */
+	mem = fw->data;
+	end = &mem[fw->size];
+	/* look for header record */
+	ret = fw_parse(&mem, &type, &addr, &len, &dat);
+	if (ret)
+		goto failed;
+	ret = -EINVAL;
+	if (type != 0xffff) {
+		dev_alert(&card->pdev->dev, "firmware starts with type 0x%x\n",
+			type);
+		goto failed;
+	}
+	if (strncmp("Structured Binary Format, Softing GmbH", dat, len)) {
+		dev_alert(&card->pdev->dev, "firmware string '%.*s' fault\n",
+				len, dat);
+		goto failed;
+	}
+	/* ok, we had a header */
+	while (mem < end) {
+		ret = fw_parse(&mem, &type, &addr, &len, &dat);
+		if (ret)
+			goto failed;
+
+		if (type == 3) {
+			/* start address */
+			start_addr = addr;
+			type_entrypoint = 1;
+			continue;
+		} else if (type == 1) {
+			/* eof */
+			type_end = 1;
+			break;
+		} else if (type != 0) {
+			dev_alert(&card->pdev->dev,
+					"unknown record type 0x%04x\n", type);
+			ret = -EINVAL;
+			goto failed;
+		}
+
+		/* regualar data */
+		for (sum = 0, j = 0; j < len; ++j)
+			sum += dat[j];
+		/* work in 16bit (target) */
+		sum &= 0xffff;
+
+		memcpy_toio(&card->dpram[card->pdat->app.offs], dat, len);
+		iowrite32(card->pdat->app.offs + card->pdat->app.addr,
+				&card->dpram[DPRAM_COMMAND + 2]);
+		iowrite32(addr, &card->dpram[DPRAM_COMMAND + 6]);
+		iowrite16(len, &card->dpram[DPRAM_COMMAND + 10]);
+		iowrite8(1, &card->dpram[DPRAM_COMMAND + 12]);
+		ret = softing_bootloader_command(card, 1, "loading app.");
+		if (ret < 0)
+			goto failed;
+		/* verify checksum */
+		rx_sum = ioread16(&card->dpram[DPRAM_RECEIPT + 2]);
+		if (rx_sum != sum) {
+			dev_alert(&card->pdev->dev, "SRAM seems to be damaged"
+				", wanted 0x%04x, got 0x%04x\n", sum, rx_sum);
+			ret = -EIO;
+			goto failed;
+		}
+	}
+	if (!type_end || !type_entrypoint)
+		goto failed;
+	/* start application in card */
+	iowrite32(start_addr, &card->dpram[DPRAM_COMMAND + 2]);
+	iowrite8(1, &card->dpram[DPRAM_COMMAND + 6]);
+	ret = softing_bootloader_command(card, 3, "start app.");
+	if (ret < 0)
+		goto failed;
+	ret = 0;
+failed:
+	release_firmware(fw);
+	if (ret < 0)
+		dev_info(&card->pdev->dev, "firmware %s failed\n", file);
+	return ret;
+}
+
+static int softing_reset_chip(struct softing *card)
+{
+	int ret;
+
+	do {
+		/* reset chip */
+		iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO]);
+		iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO+1]);
+		iowrite8(1, &card->dpram[DPRAM_RESET]);
+		iowrite8(0, &card->dpram[DPRAM_RESET+1]);
+
+		ret = softing_fct_cmd(card, 0, "reset_can");
+		if (!ret)
+			break;
+		if (signal_pending(current))
+			/* don't wait any longer */
+			break;
+	} while (1);
+	card->tx.pending = 0;
+	return ret;
+}
+
+int softing_chip_poweron(struct softing *card)
+{
+	int ret;
+	/* sync */
+	ret = _softing_fct_cmd(card, 99, 0x55, "sync-a");
+	if (ret < 0)
+		goto failed;
+
+	ret = _softing_fct_cmd(card, 99, 0xaa, "sync-b");
+	if (ret < 0)
+		goto failed;
+
+	ret = softing_reset_chip(card);
+	if (ret < 0)
+		goto failed;
+	/* get_serial */
+	ret = softing_fct_cmd(card, 43, "get_serial_number");
+	if (ret < 0)
+		goto failed;
+	card->id.serial = ioread32(&card->dpram[DPRAM_FCT_PARAM]);
+	/* get_version */
+	ret = softing_fct_cmd(card, 12, "get_version");
+	if (ret < 0)
+		goto failed;
+	card->id.fw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 2]);
+	card->id.hw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 4]);
+	card->id.license = ioread16(&card->dpram[DPRAM_FCT_PARAM + 6]);
+	card->id.chip[0] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 8]);
+	card->id.chip[1] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 10]);
+	return 0;
+failed:
+	return ret;
+}
+
+static void softing_initialize_timestamp(struct softing *card)
+{
+	uint64_t ovf;
+
+	card->ts_ref = ktime_get();
+
+	/* 16MHz is the reference */
+	ovf = 0x100000000ULL * 16;
+	do_div(ovf, card->pdat->freq ?: 16);
+
+	card->ts_overflow = ktime_add_us(ktime_set(0, 0), ovf);
+}
+
+ktime_t softing_raw2ktime(struct softing *card, u32 raw)
+{
+	uint64_t rawl;
+	ktime_t now, real_offset;
+	ktime_t target;
+	ktime_t tmp;
+
+	now = ktime_get();
+	real_offset = ktime_sub(ktime_get_real(), now);
+
+	/* find nsec from card */
+	rawl = raw * 16;
+	do_div(rawl, card->pdat->freq ?: 16);
+	target = ktime_add_us(card->ts_ref, rawl);
+	/* test for overflows */
+	tmp = ktime_add(target, card->ts_overflow);
+	while (unlikely(ktime_to_ns(tmp) > ktime_to_ns(now))) {
+		card->ts_ref = ktime_add(card->ts_ref, card->ts_overflow);
+		target = tmp;
+		tmp = ktime_add(target, card->ts_overflow);
+	}
+	return ktime_add(target, real_offset);
+}
+
+static inline int softing_error_reporting(struct net_device *netdev)
+{
+	struct softing_priv *priv = netdev_priv(netdev);
+
+	return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+		? 1 : 0;
+}
+
+int softing_startstop(struct net_device *dev, int up)
+{
+	int ret;
+	struct softing *card;
+	struct softing_priv *priv;
+	struct net_device *netdev;
+	int bus_bitmask_start;
+	int j, error_reporting;
+	struct can_frame msg;
+	const struct can_bittiming *bt;
+
+	priv = netdev_priv(dev);
+	card = priv->card;
+
+	if (!card->fw.up)
+		return -EIO;
+
+	ret = mutex_lock_interruptible(&card->fw.lock);
+	if (ret)
+		return ret;
+
+	bus_bitmask_start = 0;
+	if (dev && up)
+		/* prepare to start this bus as well */
+		bus_bitmask_start |= (1 << priv->index);
+	/* bring netdevs down */
+	for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+		netdev = card->net[j];
+		if (!netdev)
+			continue;
+		priv = netdev_priv(netdev);
+
+		if (dev != netdev)
+			netif_stop_queue(netdev);
+
+		if (netif_running(netdev)) {
+			if (dev != netdev)
+				bus_bitmask_start |= (1 << j);
+			priv->tx.pending = 0;
+			priv->tx.echo_put = 0;
+			priv->tx.echo_get = 0;
+			/*
+			 * this bus' may just have called open_candev()
+			 * which is rather stupid to call close_candev()
+			 * already
+			 * but we may come here from busoff recovery too
+			 * in which case the echo_skb _needs_ flushing too.
+			 * just be sure to call open_candev() again
+			 */
+			close_candev(netdev);
+		}
+		priv->can.state = CAN_STATE_STOPPED;
+	}
+	card->tx.pending = 0;
+
+	softing_enable_irq(card, 0);
+	ret = softing_reset_chip(card);
+	if (ret)
+		goto failed;
+	if (!bus_bitmask_start)
+		/* no busses to be brought up */
+		goto card_done;
+
+	if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2)
+			&& (softing_error_reporting(card->net[0])
+				!= softing_error_reporting(card->net[1]))) {
+		dev_alert(&card->pdev->dev,
+				"err_reporting flag differs for busses\n");
+		goto invalid;
+	}
+	error_reporting = 0;
+	if (bus_bitmask_start & 1) {
+		netdev = card->net[0];
+		priv = netdev_priv(netdev);
+		error_reporting += softing_error_reporting(netdev);
+		/* init chip 1 */
+		bt = &priv->can.bittiming;
+		iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
+		iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
+		iowrite16(bt->phase_seg1 + bt->prop_seg,
+				&card->dpram[DPRAM_FCT_PARAM + 6]);
+		iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
+		iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
+				&card->dpram[DPRAM_FCT_PARAM + 10]);
+		ret = softing_fct_cmd(card, 1, "initialize_chip[0]");
+		if (ret < 0)
+			goto failed;
+		/* set mode */
+		iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
+		iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
+		ret = softing_fct_cmd(card, 3, "set_mode[0]");
+		if (ret < 0)
+			goto failed;
+		/* set filter */
+		/* 11bit id & mask */
+		iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
+		iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
+		/* 29bit id.lo & mask.lo & id.hi & mask.hi */
+		iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
+		iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
+		iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
+		iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
+		ret = softing_fct_cmd(card, 7, "set_filter[0]");
+		if (ret < 0)
+			goto failed;
+		/* set output control */
+		iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
+		ret = softing_fct_cmd(card, 5, "set_output[0]");
+		if (ret < 0)
+			goto failed;
+	}
+	if (bus_bitmask_start & 2) {
+		netdev = card->net[1];
+		priv = netdev_priv(netdev);
+		error_reporting += softing_error_reporting(netdev);
+		/* init chip2 */
+		bt = &priv->can.bittiming;
+		iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]);
+		iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]);
+		iowrite16(bt->phase_seg1 + bt->prop_seg,
+				&card->dpram[DPRAM_FCT_PARAM + 6]);
+		iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]);
+		iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0,
+				&card->dpram[DPRAM_FCT_PARAM + 10]);
+		ret = softing_fct_cmd(card, 2, "initialize_chip[1]");
+		if (ret < 0)
+			goto failed;
+		/* set mode2 */
+		iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]);
+		iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]);
+		ret = softing_fct_cmd(card, 4, "set_mode[1]");
+		if (ret < 0)
+			goto failed;
+		/* set filter2 */
+		/* 11bit id & mask */
+		iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]);
+		iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]);
+		/* 29bit id.lo & mask.lo & id.hi & mask.hi */
+		iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]);
+		iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]);
+		iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]);
+		iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]);
+		ret = softing_fct_cmd(card, 8, "set_filter[1]");
+		if (ret < 0)
+			goto failed;
+		/* set output control2 */
+		iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]);
+		ret = softing_fct_cmd(card, 6, "set_output[1]");
+		if (ret < 0)
+			goto failed;
+	}
+	/* enable_error_frame */
+	/*
+	 * Error reporting is switched off at the moment since
+	 * the receiving of them is not yet 100% verified
+	 * This should be enabled sooner or later
+	 *
+	if (error_reporting) {
+		ret = softing_fct_cmd(card, 51, "enable_error_frame");
+		if (ret < 0)
+			goto failed;
+	}
+	*/
+	/* initialize interface */
+	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]);
+	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]);
+	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 6]);
+	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 8]);
+	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 10]);
+	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 12]);
+	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 14]);
+	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 16]);
+	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 18]);
+	iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 20]);
+	ret = softing_fct_cmd(card, 17, "initialize_interface");
+	if (ret < 0)
+		goto failed;
+	/* enable_fifo */
+	ret = softing_fct_cmd(card, 36, "enable_fifo");
+	if (ret < 0)
+		goto failed;
+	/* enable fifo tx ack */
+	ret = softing_fct_cmd(card, 13, "fifo_tx_ack[0]");
+	if (ret < 0)
+		goto failed;
+	/* enable fifo tx ack2 */
+	ret = softing_fct_cmd(card, 14, "fifo_tx_ack[1]");
+	if (ret < 0)
+		goto failed;
+	/* start_chip */
+	ret = softing_fct_cmd(card, 11, "start_chip");
+	if (ret < 0)
+		goto failed;
+	iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE]);
+	iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE2]);
+	if (card->pdat->generation < 2) {
+		iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
+		/* flush the DPRAM caches */
+		wmb();
+	}
+
+	softing_initialize_timestamp(card);
+
+	/*
+	 * do socketcan notifications/status changes
+	 * from here, no errors should occur, or the failed: part
+	 * must be reviewed
+	 */
+	memset(&msg, 0, sizeof(msg));
+	msg.can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
+	msg.can_dlc = CAN_ERR_DLC;
+	for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+		if (!(bus_bitmask_start & (1 << j)))
+			continue;
+		netdev = card->net[j];
+		if (!netdev)
+			continue;
+		priv = netdev_priv(netdev);
+		priv->can.state = CAN_STATE_ERROR_ACTIVE;
+		open_candev(netdev);
+		if (dev != netdev) {
+			/* notify other busses on the restart */
+			softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
+			++priv->can.can_stats.restarts;
+		}
+		netif_wake_queue(netdev);
+	}
+
+	/* enable interrupts */
+	ret = softing_enable_irq(card, 1);
+	if (ret)
+		goto failed;
+card_done:
+	mutex_unlock(&card->fw.lock);
+	return 0;
+invalid:
+	ret = -EINVAL;
+failed:
+	softing_enable_irq(card, 0);
+	softing_reset_chip(card);
+	mutex_unlock(&card->fw.lock);
+	/* bring all other interfaces down */
+	for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+		netdev = card->net[j];
+		if (!netdev)
+			continue;
+		dev_close(netdev);
+	}
+	return ret;
+}
+
+int softing_default_output(struct net_device *netdev)
+{
+	struct softing_priv *priv = netdev_priv(netdev);
+	struct softing *card = priv->card;
+
+	switch (priv->chip) {
+	case 1000:
+		return (card->pdat->generation < 2) ? 0xfb : 0xfa;
+	case 5:
+		return 0x60;
+	default:
+		return 0x40;
+	}
+}
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
new file mode 100644
index 0000000..5157e15
--- /dev/null
+++ b/drivers/net/can/softing/softing_main.c
@@ -0,0 +1,893 @@
+/*
+ * Copyright (C) 2008-2010
+ *
+ * - Kurt Van Dijck, EIA Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include "softing.h"
+
+#define TX_ECHO_SKB_MAX (((TXMAX+1)/2)-1)
+
+/*
+ * test is a specific CAN netdev
+ * is online (ie. up 'n running, not sleeping, not busoff
+ */
+static inline int canif_is_active(struct net_device *netdev)
+{
+	struct can_priv *can = netdev_priv(netdev);
+
+	if (!netif_running(netdev))
+		return 0;
+	return (can->state <= CAN_STATE_ERROR_PASSIVE);
+}
+
+/* reset DPRAM */
+static inline void softing_set_reset_dpram(struct softing *card)
+{
+	if (card->pdat->generation >= 2) {
+		spin_lock_bh(&card->spin);
+		iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) & ~1,
+				&card->dpram[DPRAM_V2_RESET]);
+		spin_unlock_bh(&card->spin);
+	}
+}
+
+static inline void softing_clr_reset_dpram(struct softing *card)
+{
+	if (card->pdat->generation >= 2) {
+		spin_lock_bh(&card->spin);
+		iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) | 1,
+				&card->dpram[DPRAM_V2_RESET]);
+		spin_unlock_bh(&card->spin);
+	}
+}
+
+/* trigger the tx queue-ing */
+static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb,
+		struct net_device *dev)
+{
+	struct softing_priv *priv = netdev_priv(dev);
+	struct softing *card = priv->card;
+	int ret;
+	uint8_t *ptr;
+	uint8_t fifo_wr, fifo_rd;
+	struct can_frame *cf = (struct can_frame *)skb->data;
+	uint8_t buf[DPRAM_TX_SIZE];
+
+	if (can_dropped_invalid_skb(dev, skb))
+		return NETDEV_TX_OK;
+
+	spin_lock(&card->spin);
+
+	ret = NETDEV_TX_BUSY;
+	if (!card->fw.up ||
+			(card->tx.pending >= TXMAX) ||
+			(priv->tx.pending >= TX_ECHO_SKB_MAX))
+		goto xmit_done;
+	fifo_wr = ioread8(&card->dpram[DPRAM_TX_WR]);
+	fifo_rd = ioread8(&card->dpram[DPRAM_TX_RD]);
+	if (fifo_wr == fifo_rd)
+		/* fifo full */
+		goto xmit_done;
+	memset(buf, 0, sizeof(buf));
+	ptr = buf;
+	*ptr = CMD_TX;
+	if (cf->can_id & CAN_RTR_FLAG)
+		*ptr |= CMD_RTR;
+	if (cf->can_id & CAN_EFF_FLAG)
+		*ptr |= CMD_XTD;
+	if (priv->index)
+		*ptr |= CMD_BUS2;
+	++ptr;
+	*ptr++ = cf->can_dlc;
+	*ptr++ = (cf->can_id >> 0);
+	*ptr++ = (cf->can_id >> 8);
+	if (cf->can_id & CAN_EFF_FLAG) {
+		*ptr++ = (cf->can_id >> 16);
+		*ptr++ = (cf->can_id >> 24);
+	} else {
+		/* increment 1, not 2 as you might think */
+		ptr += 1;
+	}
+	if (!(cf->can_id & CAN_RTR_FLAG))
+		memcpy(ptr, &cf->data[0], cf->can_dlc);
+	memcpy_toio(&card->dpram[DPRAM_TX + DPRAM_TX_SIZE * fifo_wr],
+			buf, DPRAM_TX_SIZE);
+	if (++fifo_wr >= DPRAM_TX_CNT)
+		fifo_wr = 0;
+	iowrite8(fifo_wr, &card->dpram[DPRAM_TX_WR]);
+	card->tx.last_bus = priv->index;
+	++card->tx.pending;
+	++priv->tx.pending;
+	can_put_echo_skb(skb, dev, priv->tx.echo_put);
+	++priv->tx.echo_put;
+	if (priv->tx.echo_put >= TX_ECHO_SKB_MAX)
+		priv->tx.echo_put = 0;
+	/* can_put_echo_skb() saves the skb, safe to return TX_OK */
+	ret = NETDEV_TX_OK;
+xmit_done:
+	spin_unlock(&card->spin);
+	if (card->tx.pending >= TXMAX) {
+		int j;
+		for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+			if (card->net[j])
+				netif_stop_queue(card->net[j]);
+		}
+	}
+	if (ret != NETDEV_TX_OK)
+		netif_stop_queue(dev);
+
+	return ret;
+}
+
+/*
+ * shortcut for skb delivery
+ */
+int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg,
+		ktime_t ktime)
+{
+	struct sk_buff *skb;
+	struct can_frame *cf;
+
+	skb = alloc_can_skb(netdev, &cf);
+	if (!skb)
+		return -ENOMEM;
+	memcpy(cf, msg, sizeof(*msg));
+	skb->tstamp = ktime;
+	return netif_rx(skb);
+}
+
+/*
+ * softing_handle_1
+ * pop 1 entry from the DPRAM queue, and process
+ */
+static int softing_handle_1(struct softing *card)
+{
+	struct net_device *netdev;
+	struct softing_priv *priv;
+	ktime_t ktime;
+	struct can_frame msg;
+	int cnt = 0, lost_msg;
+	uint8_t fifo_rd, fifo_wr, cmd;
+	uint8_t *ptr;
+	uint32_t tmp_u32;
+	uint8_t buf[DPRAM_RX_SIZE];
+
+	memset(&msg, 0, sizeof(msg));
+	/* test for lost msgs */
+	lost_msg = ioread8(&card->dpram[DPRAM_RX_LOST]);
+	if (lost_msg) {
+		int j;
+		/* reset condition */
+		iowrite8(0, &card->dpram[DPRAM_RX_LOST]);
+		/* prepare msg */
+		msg.can_id = CAN_ERR_FLAG | CAN_ERR_CRTL;
+		msg.can_dlc = CAN_ERR_DLC;
+		msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+		/*
+		 * service to all busses, we don't know which it was applicable
+		 * but only service busses that are online
+		 */
+		for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+			netdev = card->net[j];
+			if (!netdev)
+				continue;
+			if (!canif_is_active(netdev))
+				/* a dead bus has no overflows */
+				continue;
+			++netdev->stats.rx_over_errors;
+			softing_netdev_rx(netdev, &msg, ktime_set(0, 0));
+		}
+		/* prepare for other use */
+		memset(&msg, 0, sizeof(msg));
+		++cnt;
+	}
+
+	fifo_rd = ioread8(&card->dpram[DPRAM_RX_RD]);
+	fifo_wr = ioread8(&card->dpram[DPRAM_RX_WR]);
+
+	if (++fifo_rd >= DPRAM_RX_CNT)
+		fifo_rd = 0;
+	if (fifo_wr == fifo_rd)
+		return cnt;
+
+	memcpy_fromio(buf, &card->dpram[DPRAM_RX + DPRAM_RX_SIZE*fifo_rd],
+			DPRAM_RX_SIZE);
+	mb();
+	/* trigger dual port RAM */
+	iowrite8(fifo_rd, &card->dpram[DPRAM_RX_RD]);
+
+	ptr = buf;
+	cmd = *ptr++;
+	if (cmd == 0xff)
+		/* not quite usefull, probably the card has got out */
+		return 0;
+	netdev = card->net[0];
+	if (cmd & CMD_BUS2)
+		netdev = card->net[1];
+	priv = netdev_priv(netdev);
+
+	if (cmd & CMD_ERR) {
+		uint8_t can_state, state;
+
+		state = *ptr++;
+
+		msg.can_id = CAN_ERR_FLAG;
+		msg.can_dlc = CAN_ERR_DLC;
+
+		if (state & SF_MASK_BUSOFF) {
+			can_state = CAN_STATE_BUS_OFF;
+			msg.can_id |= CAN_ERR_BUSOFF;
+			state = STATE_BUSOFF;
+		} else if (state & SF_MASK_EPASSIVE) {
+			can_state = CAN_STATE_ERROR_PASSIVE;
+			msg.can_id |= CAN_ERR_CRTL;
+			msg.data[1] = CAN_ERR_CRTL_TX_PASSIVE;
+			state = STATE_EPASSIVE;
+		} else {
+			can_state = CAN_STATE_ERROR_ACTIVE;
+			msg.can_id |= CAN_ERR_CRTL;
+			state = STATE_EACTIVE;
+		}
+		/* update DPRAM */
+		iowrite8(state, &card->dpram[priv->index ?
+				DPRAM_INFO_BUSSTATE2 : DPRAM_INFO_BUSSTATE]);
+		/* timestamp */
+		tmp_u32 = le32_to_cpup((void *)ptr);
+		ptr += 4;
+		ktime = softing_raw2ktime(card, tmp_u32);
+
+		++netdev->stats.rx_errors;
+		/* update internal status */
+		if (can_state != priv->can.state) {
+			priv->can.state = can_state;
+			if (can_state == CAN_STATE_ERROR_PASSIVE)
+				++priv->can.can_stats.error_passive;
+			else if (can_state == CAN_STATE_BUS_OFF) {
+				/* this calls can_close_cleanup() */
+				can_bus_off(netdev);
+				netif_stop_queue(netdev);
+			}
+			/* trigger socketcan */
+			softing_netdev_rx(netdev, &msg, ktime);
+		}
+
+	} else {
+		if (cmd & CMD_RTR)
+			msg.can_id |= CAN_RTR_FLAG;
+		msg.can_dlc = get_can_dlc(*ptr++);
+		if (cmd & CMD_XTD) {
+			msg.can_id |= CAN_EFF_FLAG;
+			msg.can_id |= le32_to_cpup((void *)ptr);
+			ptr += 4;
+		} else {
+			msg.can_id |= le16_to_cpup((void *)ptr);
+			ptr += 2;
+		}
+		/* timestamp */
+		tmp_u32 = le32_to_cpup((void *)ptr);
+		ptr += 4;
+		ktime = softing_raw2ktime(card, tmp_u32);
+		if (!(msg.can_id & CAN_RTR_FLAG))
+			memcpy(&msg.data[0], ptr, 8);
+		ptr += 8;
+		/* update socket */
+		if (cmd & CMD_ACK) {
+			/* acknowledge, was tx msg */
+			struct sk_buff *skb;
+			skb = priv->can.echo_skb[priv->tx.echo_get];
+			if (skb)
+				skb->tstamp = ktime;
+			can_get_echo_skb(netdev, priv->tx.echo_get);
+			++priv->tx.echo_get;
+			if (priv->tx.echo_get >= TX_ECHO_SKB_MAX)
+				priv->tx.echo_get = 0;
+			if (priv->tx.pending)
+				--priv->tx.pending;
+			if (card->tx.pending)
+				--card->tx.pending;
+			++netdev->stats.tx_packets;
+			if (!(msg.can_id & CAN_RTR_FLAG))
+				netdev->stats.tx_bytes += msg.can_dlc;
+		} else {
+			int ret;
+
+			ret = softing_netdev_rx(netdev, &msg, ktime);
+			if (ret == NET_RX_SUCCESS) {
+				++netdev->stats.rx_packets;
+				if (!(msg.can_id & CAN_RTR_FLAG))
+					netdev->stats.rx_bytes += msg.can_dlc;
+			} else {
+				++netdev->stats.rx_dropped;
+			}
+		}
+	}
+	++cnt;
+	return cnt;
+}
+
+/*
+ * real interrupt handler
+ */
+static irqreturn_t softing_irq_thread(int irq, void *dev_id)
+{
+	struct softing *card = (struct softing *)dev_id;
+	struct net_device *netdev;
+	struct softing_priv *priv;
+	int j, offset, work_done;
+
+	work_done = 0;
+	spin_lock_bh(&card->spin);
+	while (softing_handle_1(card) > 0) {
+		++card->irq.svc_count;
+		++work_done;
+	}
+	spin_unlock_bh(&card->spin);
+	/* resume tx queue's */
+	offset = card->tx.last_bus;
+	for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+		if (card->tx.pending >= TXMAX)
+			break;
+		netdev = card->net[(j + offset + 1) % card->pdat->nbus];
+		if (!netdev)
+			continue;
+		priv = netdev_priv(netdev);
+		if (!canif_is_active(netdev))
+			/* it makes no sense to wake dead busses */
+			continue;
+		if (priv->tx.pending >= TX_ECHO_SKB_MAX)
+			continue;
+		++work_done;
+		netif_wake_queue(netdev);
+	}
+	return work_done ? IRQ_HANDLED : IRQ_NONE;
+}
+
+/*
+ * interrupt routines:
+ * schedule the 'real interrupt handler'
+ */
+static irqreturn_t softing_irq_v2(int irq, void *dev_id)
+{
+	struct softing *card = (struct softing *)dev_id;
+	uint8_t ir;
+
+	ir = ioread8(&card->dpram[DPRAM_V2_IRQ_TOHOST]);
+	iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]);
+	return (1 == ir) ? IRQ_WAKE_THREAD : IRQ_NONE;
+}
+
+static irqreturn_t softing_irq_v1(int irq, void *dev_id)
+{
+	struct softing *card = (struct softing *)dev_id;
+	uint8_t ir;
+
+	ir = ioread8(&card->dpram[DPRAM_IRQ_TOHOST]);
+	iowrite8(0, &card->dpram[DPRAM_IRQ_TOHOST]);
+	return ir ? IRQ_WAKE_THREAD : IRQ_NONE;
+}
+
+/*
+ * netdev/candev inter-operability
+ */
+static int softing_netdev_open(struct net_device *ndev)
+{
+	int ret;
+
+	/* check or determine and set bittime */
+	ret = open_candev(ndev);
+	if (!ret)
+		ret = softing_startstop(ndev, 1);
+	return ret;
+}
+
+static int softing_netdev_stop(struct net_device *ndev)
+{
+	int ret;
+
+	netif_stop_queue(ndev);
+
+	/* softing cycle does close_candev() */
+	ret = softing_startstop(ndev, 0);
+	return ret;
+}
+
+static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+	int ret;
+
+	switch (mode) {
+	case CAN_MODE_START:
+		/* softing_startstop does close_candev() */
+		ret = softing_startstop(ndev, 1);
+		return ret;
+	case CAN_MODE_STOP:
+	case CAN_MODE_SLEEP:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+/*
+ * Softing device management helpers
+ */
+int softing_enable_irq(struct softing *card, int enable)
+{
+	int ret;
+
+	if (!card->irq.nr) {
+		return 0;
+	} else if (card->irq.requested && !enable) {
+		free_irq(card->irq.nr, card);
+		card->irq.requested = 0;
+	} else if (!card->irq.requested && enable) {
+		ret = request_threaded_irq(card->irq.nr,
+				(card->pdat->generation >= 2) ?
+					softing_irq_v2 : softing_irq_v1,
+				softing_irq_thread, IRQF_SHARED,
+				dev_name(&card->pdev->dev), card);
+		if (ret) {
+			dev_alert(&card->pdev->dev,
+					"request_threaded_irq(%u) failed\n",
+					card->irq.nr);
+			return ret;
+		}
+		card->irq.requested = 1;
+	}
+	return 0;
+}
+
+static void softing_card_shutdown(struct softing *card)
+{
+	int fw_up = 0;
+
+	if (mutex_lock_interruptible(&card->fw.lock))
+		/* return -ERESTARTSYS */;
+	fw_up = card->fw.up;
+	card->fw.up = 0;
+
+	if (card->irq.requested && card->irq.nr) {
+		free_irq(card->irq.nr, card);
+		card->irq.requested = 0;
+	}
+	if (fw_up) {
+		if (card->pdat->enable_irq)
+			card->pdat->enable_irq(card->pdev, 0);
+		softing_set_reset_dpram(card);
+		if (card->pdat->reset)
+			card->pdat->reset(card->pdev, 1);
+	}
+	mutex_unlock(&card->fw.lock);
+}
+
+static __devinit int softing_card_boot(struct softing *card)
+{
+	int ret, j;
+	static const uint8_t stream[] = {
+		0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, };
+	unsigned char back[sizeof(stream)];
+
+	if (mutex_lock_interruptible(&card->fw.lock))
+		return -ERESTARTSYS;
+	if (card->fw.up) {
+		mutex_unlock(&card->fw.lock);
+		return 0;
+	}
+	/* reset board */
+	if (card->pdat->enable_irq)
+		card->pdat->enable_irq(card->pdev, 1);
+	/* boot card */
+	softing_set_reset_dpram(card);
+	if (card->pdat->reset)
+		card->pdat->reset(card->pdev, 1);
+	for (j = 0; (j + sizeof(stream)) < card->dpram_size;
+			j += sizeof(stream)) {
+
+		memcpy_toio(&card->dpram[j], stream, sizeof(stream));
+		/* flush IO cache */
+		mb();
+		memcpy_fromio(back, &card->dpram[j], sizeof(stream));
+
+		if (!memcmp(back, stream, sizeof(stream)))
+			continue;
+		/* memory is not equal */
+		dev_alert(&card->pdev->dev, "dpram failed at 0x%04x\n", j);
+		ret = -EIO;
+		goto failed;
+	}
+	wmb();
+	/* load boot firmware */
+	ret = softing_load_fw(card->pdat->boot.fw, card, card->dpram,
+				card->dpram_size,
+				card->pdat->boot.offs - card->pdat->boot.addr);
+	if (ret < 0)
+		goto failed;
+	/* load loader firmware */
+	ret = softing_load_fw(card->pdat->load.fw, card, card->dpram,
+				card->dpram_size,
+				card->pdat->load.offs - card->pdat->load.addr);
+	if (ret < 0)
+		goto failed;
+
+	if (card->pdat->reset)
+		card->pdat->reset(card->pdev, 0);
+	softing_clr_reset_dpram(card);
+	ret = softing_bootloader_command(card, 0, "card boot");
+	if (ret < 0)
+		goto failed;
+	ret = softing_load_app_fw(card->pdat->app.fw, card);
+	if (ret < 0)
+		goto failed;
+
+	ret = softing_chip_poweron(card);
+	if (ret < 0)
+		goto failed;
+
+	card->fw.up = 1;
+	mutex_unlock(&card->fw.lock);
+	return 0;
+failed:
+	card->fw.up = 0;
+	if (card->pdat->enable_irq)
+		card->pdat->enable_irq(card->pdev, 0);
+	softing_set_reset_dpram(card);
+	if (card->pdat->reset)
+		card->pdat->reset(card->pdev, 1);
+	mutex_unlock(&card->fw.lock);
+	return ret;
+}
+
+/*
+ * netdev sysfs
+ */
+static ssize_t show_channel(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct net_device *ndev = to_net_dev(dev);
+	struct softing_priv *priv = netdev2softing(ndev);
+
+	return sprintf(buf, "%i\n", priv->index);
+}
+
+static ssize_t show_chip(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct net_device *ndev = to_net_dev(dev);
+	struct softing_priv *priv = netdev2softing(ndev);
+
+	return sprintf(buf, "%i\n", priv->chip);
+}
+
+static ssize_t show_output(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct net_device *ndev = to_net_dev(dev);
+	struct softing_priv *priv = netdev2softing(ndev);
+
+	return sprintf(buf, "0x%02x\n", priv->output);
+}
+
+static ssize_t store_output(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct net_device *ndev = to_net_dev(dev);
+	struct softing_priv *priv = netdev2softing(ndev);
+	struct softing *card = priv->card;
+	unsigned long val;
+	int ret;
+
+	ret = strict_strtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	val &= 0xFF;
+
+	ret = mutex_lock_interruptible(&card->fw.lock);
+	if (ret)
+		return -ERESTARTSYS;
+	if (netif_running(ndev)) {
+		mutex_unlock(&card->fw.lock);
+		return -EBUSY;
+	}
+	priv->output = val;
+	mutex_unlock(&card->fw.lock);
+	return count;
+}
+
+static const DEVICE_ATTR(channel, S_IRUGO, show_channel, NULL);
+static const DEVICE_ATTR(chip, S_IRUGO, show_chip, NULL);
+static const DEVICE_ATTR(output, S_IRUGO | S_IWUSR, show_output, store_output);
+
+static const struct attribute *const netdev_sysfs_attrs[] = {
+	&dev_attr_channel.attr,
+	&dev_attr_chip.attr,
+	&dev_attr_output.attr,
+	NULL,
+};
+static const struct attribute_group netdev_sysfs_group = {
+	.name = NULL,
+	.attrs = (struct attribute **)netdev_sysfs_attrs,
+};
+
+static const struct net_device_ops softing_netdev_ops = {
+	.ndo_open = softing_netdev_open,
+	.ndo_stop = softing_netdev_stop,
+	.ndo_start_xmit	= softing_netdev_start_xmit,
+};
+
+static const struct can_bittiming_const softing_btr_const = {
+	.tseg1_min = 1,
+	.tseg1_max = 16,
+	.tseg2_min = 1,
+	.tseg2_max = 8,
+	.sjw_max = 4, /* overruled */
+	.brp_min = 1,
+	.brp_max = 32, /* overruled */
+	.brp_inc = 1,
+};
+
+
+static __devinit struct net_device *softing_netdev_create(struct softing *card,
+		uint16_t chip_id)
+{
+	struct net_device *netdev;
+	struct softing_priv *priv;
+
+	netdev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX);
+	if (!netdev) {
+		dev_alert(&card->pdev->dev, "alloc_candev failed\n");
+		return NULL;
+	}
+	priv = netdev_priv(netdev);
+	priv->netdev = netdev;
+	priv->card = card;
+	memcpy(&priv->btr_const, &softing_btr_const, sizeof(priv->btr_const));
+	priv->btr_const.brp_max = card->pdat->max_brp;
+	priv->btr_const.sjw_max = card->pdat->max_sjw;
+	priv->can.bittiming_const = &priv->btr_const;
+	priv->can.clock.freq = 8000000;
+	priv->chip = chip_id;
+	priv->output = softing_default_output(netdev);
+	SET_NETDEV_DEV(netdev, &card->pdev->dev);
+
+	netdev->flags |= IFF_ECHO;
+	netdev->netdev_ops = &softing_netdev_ops;
+	priv->can.do_set_mode = softing_candev_set_mode;
+	priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+
+	return netdev;
+}
+
+static __devinit int softing_netdev_register(struct net_device *netdev)
+{
+	int ret;
+
+	netdev->sysfs_groups[0] = &netdev_sysfs_group;
+	ret = register_candev(netdev);
+	if (ret) {
+		dev_alert(&netdev->dev, "register failed\n");
+		return ret;
+	}
+	return 0;
+}
+
+static void softing_netdev_cleanup(struct net_device *netdev)
+{
+	unregister_candev(netdev);
+	free_candev(netdev);
+}
+
+/*
+ * sysfs for Platform device
+ */
+#define DEV_ATTR_RO(name, member) \
+static ssize_t show_##name(struct device *dev, \
+		struct device_attribute *attr, char *buf) \
+{ \
+	struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
+	return sprintf(buf, "%u\n", card->member); \
+} \
+static DEVICE_ATTR(name, 0444, show_##name, NULL)
+
+#define DEV_ATTR_RO_STR(name, member) \
+static ssize_t show_##name(struct device *dev, \
+		struct device_attribute *attr, char *buf) \
+{ \
+	struct softing *card = platform_get_drvdata(to_platform_device(dev)); \
+	return sprintf(buf, "%s\n", card->member); \
+} \
+static DEVICE_ATTR(name, 0444, show_##name, NULL)
+
+DEV_ATTR_RO(serial, id.serial);
+DEV_ATTR_RO_STR(firmware, pdat->app.fw);
+DEV_ATTR_RO(firmware_version, id.fw_version);
+DEV_ATTR_RO_STR(hardware, pdat->name);
+DEV_ATTR_RO(hardware_version, id.hw_version);
+DEV_ATTR_RO(license, id.license);
+DEV_ATTR_RO(frequency, id.freq);
+DEV_ATTR_RO(txpending, tx.pending);
+
+static struct attribute *softing_pdev_attrs[] = {
+	&dev_attr_serial.attr,
+	&dev_attr_firmware.attr,
+	&dev_attr_firmware_version.attr,
+	&dev_attr_hardware.attr,
+	&dev_attr_hardware_version.attr,
+	&dev_attr_license.attr,
+	&dev_attr_frequency.attr,
+	&dev_attr_txpending.attr,
+	NULL,
+};
+
+static const struct attribute_group softing_pdev_group = {
+	.name = NULL,
+	.attrs = softing_pdev_attrs,
+};
+
+/*
+ * platform driver
+ */
+static __devexit int softing_pdev_remove(struct platform_device *pdev)
+{
+	struct softing *card = platform_get_drvdata(pdev);
+	int j;
+
+	/* first, disable card*/
+	softing_card_shutdown(card);
+
+	for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+		if (!card->net[j])
+			continue;
+		softing_netdev_cleanup(card->net[j]);
+		card->net[j] = NULL;
+	}
+	sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group);
+
+	iounmap(card->dpram);
+	kfree(card);
+	return 0;
+}
+
+static __devinit int softing_pdev_probe(struct platform_device *pdev)
+{
+	const struct softing_platform_data *pdat = pdev->dev.platform_data;
+	struct softing *card;
+	struct net_device *netdev;
+	struct softing_priv *priv;
+	struct resource *pres;
+	int ret;
+	int j;
+
+	if (!pdat) {
+		dev_warn(&pdev->dev, "no platform data\n");
+		return -EINVAL;
+	}
+	if (pdat->nbus > ARRAY_SIZE(card->net)) {
+		dev_warn(&pdev->dev, "%u nets??\n", pdat->nbus);
+		return -EINVAL;
+	}
+
+	card = kzalloc(sizeof(*card), GFP_KERNEL);
+	if (!card)
+		return -ENOMEM;
+	card->pdat = pdat;
+	card->pdev = pdev;
+	platform_set_drvdata(pdev, card);
+	mutex_init(&card->fw.lock);
+	spin_lock_init(&card->spin);
+
+	ret = -EINVAL;
+	pres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!pres)
+		goto platform_resource_failed;;
+	card->dpram_phys = pres->start;
+	card->dpram_size = pres->end - pres->start + 1;
+	card->dpram = ioremap_nocache(card->dpram_phys, card->dpram_size);
+	if (!card->dpram) {
+		dev_alert(&card->pdev->dev, "dpram ioremap failed\n");
+		goto ioremap_failed;
+	}
+
+	pres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (pres)
+		card->irq.nr = pres->start;
+
+	/* reset card */
+	ret = softing_card_boot(card);
+	if (ret < 0) {
+		dev_alert(&pdev->dev, "failed to boot\n");
+		goto boot_failed;
+	}
+
+	/* only now, the chip's are known */
+	card->id.freq = card->pdat->freq;
+
+	ret = sysfs_create_group(&pdev->dev.kobj, &softing_pdev_group);
+	if (ret < 0) {
+		dev_alert(&card->pdev->dev, "sysfs failed\n");
+		goto sysfs_failed;
+	}
+
+	ret = -ENOMEM;
+	for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+		card->net[j] = netdev =
+			softing_netdev_create(card, card->id.chip[j]);
+		if (!netdev) {
+			dev_alert(&pdev->dev, "failed to make can[%i]", j);
+			goto netdev_failed;
+		}
+		priv = netdev_priv(card->net[j]);
+		priv->index = j;
+		ret = softing_netdev_register(netdev);
+		if (ret) {
+			free_candev(netdev);
+			card->net[j] = NULL;
+			dev_alert(&card->pdev->dev,
+					"failed to register can[%i]\n", j);
+			goto netdev_failed;
+		}
+	}
+	dev_info(&card->pdev->dev, "%s ready.\n", card->pdat->name);
+	return 0;
+
+netdev_failed:
+	for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
+		if (!card->net[j])
+			continue;
+		softing_netdev_cleanup(card->net[j]);
+	}
+	sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group);
+sysfs_failed:
+	softing_card_shutdown(card);
+boot_failed:
+	iounmap(card->dpram);
+ioremap_failed:
+platform_resource_failed:
+	kfree(card);
+	return ret;
+}
+
+static struct platform_driver softing_driver = {
+	.driver = {
+		.name = "softing",
+		.owner = THIS_MODULE,
+	},
+	.probe = softing_pdev_probe,
+	.remove = __devexit_p(softing_pdev_remove),
+};
+
+MODULE_ALIAS("platform:softing");
+
+static int __init softing_start(void)
+{
+	return platform_driver_register(&softing_driver);
+}
+
+static void __exit softing_stop(void)
+{
+	platform_driver_unregister(&softing_driver);
+}
+
+module_init(softing_start);
+module_exit(softing_stop);
+
+MODULE_DESCRIPTION("Softing DPRAM CAN driver");
+MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/softing/softing_platform.h b/drivers/net/can/softing/softing_platform.h
new file mode 100644
index 0000000..ebbf698
--- /dev/null
+++ b/drivers/net/can/softing/softing_platform.h
@@ -0,0 +1,40 @@
+
+#include <linux/platform_device.h>
+
+#ifndef _SOFTING_DEVICE_H_
+#define _SOFTING_DEVICE_H_
+
+/* softing firmware directory prefix */
+#define fw_dir "softing-4.6/"
+
+struct softing_platform_data {
+	unsigned int manf;
+	unsigned int prod;
+	/*
+	 * generation
+	 * 1st with NEC or SJA1000
+	 * 8bit, exclusive interrupt, ...
+	 * 2nd only SJA1000
+	 * 16bit, shared interrupt
+	 */
+	int generation;
+	int nbus; /* # busses on device */
+	unsigned int freq; /* operating frequency in Hz */
+	unsigned int max_brp;
+	unsigned int max_sjw;
+	unsigned long dpram_size;
+	const char *name;
+	struct {
+		unsigned long offs;
+		unsigned long addr;
+		const char *fw;
+	} boot, load, app;
+	/*
+	 * reset() function
+	 * bring pdev in or out of reset, depending on value
+	 */
+	int (*reset)(struct platform_device *pdev, int value);
+	int (*enable_irq)(struct platform_device *pdev, int value);
+};
+
+#endif
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 263a294..2d2d28f 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -65,7 +65,14 @@
 static DEFINE_RWLOCK(cnic_dev_lock);
 static DEFINE_MUTEX(cnic_lock);
 
-static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+
+/* helper function, assuming cnic_lock is held */
+static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
+{
+	return rcu_dereference_protected(cnic_ulp_tbl[type],
+					 lockdep_is_held(&cnic_lock));
+}
 
 static int cnic_service_bnx2(void *, void *);
 static int cnic_service_bnx2x(void *, void *);
@@ -435,7 +442,7 @@
 		return -EINVAL;
 	}
 	mutex_lock(&cnic_lock);
-	if (cnic_ulp_tbl[ulp_type]) {
+	if (cnic_ulp_tbl_prot(ulp_type)) {
 		pr_err("%s: Type %d has already been registered\n",
 		       __func__, ulp_type);
 		mutex_unlock(&cnic_lock);
@@ -478,7 +485,7 @@
 		return -EINVAL;
 	}
 	mutex_lock(&cnic_lock);
-	ulp_ops = cnic_ulp_tbl[ulp_type];
+	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
 	if (!ulp_ops) {
 		pr_err("%s: Type %d has not been registered\n",
 		       __func__, ulp_type);
@@ -529,7 +536,7 @@
 		return -EINVAL;
 	}
 	mutex_lock(&cnic_lock);
-	if (cnic_ulp_tbl[ulp_type] == NULL) {
+	if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
 		pr_err("%s: Driver with type %d has not been registered\n",
 		       __func__, ulp_type);
 		mutex_unlock(&cnic_lock);
@@ -544,7 +551,7 @@
 
 	clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
 	cp->ulp_handle[ulp_type] = ulp_ctx;
-	ulp_ops = cnic_ulp_tbl[ulp_type];
+	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
 	rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
 	cnic_hold(dev);
 
@@ -699,13 +706,13 @@
 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
 {
 	int i;
-	u32 *page_table = dma->pgtbl;
+	__le32 *page_table = (__le32 *) dma->pgtbl;
 
 	for (i = 0; i < dma->num_pages; i++) {
 		/* Each entry needs to be in big endian format. */
-		*page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
+		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
 		page_table++;
-		*page_table = (u32) dma->pg_map_arr[i];
+		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
 		page_table++;
 	}
 }
@@ -713,13 +720,13 @@
 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
 {
 	int i;
-	u32 *page_table = dma->pgtbl;
+	__le32 *page_table = (__le32 *) dma->pgtbl;
 
 	for (i = 0; i < dma->num_pages; i++) {
 		/* Each entry needs to be in little endian format. */
-		*page_table = dma->pg_map_arr[i] & 0xffffffff;
+		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
 		page_table++;
-		*page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
+		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
 		page_table++;
 	}
 }
@@ -2953,7 +2960,8 @@
 		struct cnic_ulp_ops *ulp_ops;
 
 		mutex_lock(&cnic_lock);
-		ulp_ops = cp->ulp_ops[if_type];
+		ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+						    lockdep_is_held(&cnic_lock));
 		if (!ulp_ops) {
 			mutex_unlock(&cnic_lock);
 			continue;
@@ -2977,7 +2985,8 @@
 		struct cnic_ulp_ops *ulp_ops;
 
 		mutex_lock(&cnic_lock);
-		ulp_ops = cp->ulp_ops[if_type];
+		ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+						    lockdep_is_held(&cnic_lock));
 		if (!ulp_ops || !ulp_ops->cnic_start) {
 			mutex_unlock(&cnic_lock);
 			continue;
@@ -3041,7 +3050,7 @@
 		struct cnic_ulp_ops *ulp_ops;
 
 		mutex_lock(&cnic_lock);
-		ulp_ops = cnic_ulp_tbl[i];
+		ulp_ops = cnic_ulp_tbl_prot(i);
 		if (!ulp_ops || !ulp_ops->cnic_init) {
 			mutex_unlock(&cnic_lock);
 			continue;
@@ -3065,7 +3074,7 @@
 		struct cnic_ulp_ops *ulp_ops;
 
 		mutex_lock(&cnic_lock);
-		ulp_ops = cnic_ulp_tbl[i];
+		ulp_ops = cnic_ulp_tbl_prot(i);
 		if (!ulp_ops || !ulp_ops->cnic_exit) {
 			mutex_unlock(&cnic_lock);
 			continue;
@@ -4170,6 +4179,14 @@
 		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
 }
 
+static void cnic_get_bnx2_iscsi_info(struct cnic_dev *dev)
+{
+	u32 max_conn;
+
+	max_conn = cnic_reg_rd_ind(dev, BNX2_FW_MAX_ISCSI_CONN);
+	dev->max_iscsi_conn = max_conn;
+}
+
 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
 {
 	struct cnic_local *cp = dev->cnic_priv;
@@ -4494,6 +4511,8 @@
 		return err;
 	}
 
+	cnic_get_bnx2_iscsi_info(dev);
+
 	return 0;
 }
 
@@ -4705,129 +4724,6 @@
 	cp->rx_cons = *cp->rx_cons_ptr;
 }
 
-static int cnic_read_bnx2x_iscsi_mac(struct cnic_dev *dev, u32 upper_addr,
-				     u32 lower_addr)
-{
-	u32 val;
-	u8 mac[6];
-
-	val = CNIC_RD(dev, upper_addr);
-
-	mac[0] = (u8) (val >> 8);
-	mac[1] = (u8) val;
-
-	val = CNIC_RD(dev, lower_addr);
-
-	mac[2] = (u8) (val >> 24);
-	mac[3] = (u8) (val >> 16);
-	mac[4] = (u8) (val >> 8);
-	mac[5] = (u8) val;
-
-	if (is_valid_ether_addr(mac)) {
-		memcpy(dev->mac_addr, mac, 6);
-		return 0;
-	} else {
-		return -EINVAL;
-	}
-}
-
-static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
-{
-	struct cnic_local *cp = dev->cnic_priv;
-	u32 base, base2, addr, addr1, val;
-	int port = CNIC_PORT(cp);
-
-	dev->max_iscsi_conn = 0;
-	base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
-	if (base == 0)
-		return;
-
-	base2 = CNIC_RD(dev, (CNIC_PATH(cp) ? MISC_REG_GENERIC_CR_1 :
-					      MISC_REG_GENERIC_CR_0));
-	addr = BNX2X_SHMEM_ADDR(base,
-		dev_info.port_hw_config[port].iscsi_mac_upper);
-
-	addr1 = BNX2X_SHMEM_ADDR(base,
-		dev_info.port_hw_config[port].iscsi_mac_lower);
-
-	cnic_read_bnx2x_iscsi_mac(dev, addr, addr1);
-
-	addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
-	val = CNIC_RD(dev, addr);
-
-	if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
-		u16 val16;
-
-		addr = BNX2X_SHMEM_ADDR(base,
-				drv_lic_key[port].max_iscsi_init_conn);
-		val16 = CNIC_RD16(dev, addr);
-
-		if (val16)
-			val16 ^= 0x1e1e;
-		dev->max_iscsi_conn = val16;
-	}
-
-	if (BNX2X_CHIP_IS_E2(cp->chip_id))
-		dev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
-
-	if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) {
-		int func = CNIC_FUNC(cp);
-		u32 mf_cfg_addr;
-
-		if (BNX2X_SHMEM2_HAS(base2, mf_cfg_addr))
-			mf_cfg_addr = CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base2,
-					      mf_cfg_addr));
-		else
-			mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
-
-		if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
-			/* Must determine if the MF is SD vs SI mode */
-			addr = BNX2X_SHMEM_ADDR(base,
-					dev_info.shared_feature_config.config);
-			val = CNIC_RD(dev, addr);
-			if ((val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) ==
-			    SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT) {
-				int rc;
-
-				/* MULTI_FUNCTION_SI mode */
-				addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
-					func_ext_config[func].func_cfg);
-				val = CNIC_RD(dev, addr);
-				if (!(val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD))
-					dev->max_iscsi_conn = 0;
-
-				if (!(val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
-					dev->max_fcoe_conn = 0;
-
-				addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
-					func_ext_config[func].
-					iscsi_mac_addr_upper);
-				addr1 = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
-					func_ext_config[func].
-					iscsi_mac_addr_lower);
-				rc = cnic_read_bnx2x_iscsi_mac(dev, addr,
-								addr1);
-				if (rc && func > 1)
-					dev->max_iscsi_conn = 0;
-
-				return;
-			}
-		}
-
-		addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
-			func_mf_config[func].e1hov_tag);
-
-		val = CNIC_RD(dev, addr);
-		val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
-		if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
-			dev->max_fcoe_conn = 0;
-			dev->max_iscsi_conn = 0;
-		}
-	}
-	if (!is_valid_ether_addr(dev->mac_addr))
-		dev->max_iscsi_conn = 0;
-}
-
 static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
 {
 	struct cnic_local *cp = dev->cnic_priv;
@@ -4909,8 +4805,6 @@
 
 	cnic_init_bnx2x_kcq(dev);
 
-	cnic_get_bnx2x_iscsi_info(dev);
-
 	/* Only 1 EQ */
 	CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
@@ -5343,6 +5237,14 @@
 	cdev->pcidev = pdev;
 	cp->chip_id = ethdev->chip_id;
 
+	if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
+		cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
+	if (BNX2X_CHIP_IS_E2(cp->chip_id) &&
+	    !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
+		cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
+
+	memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
+
 	cp->cnic_ops = &cnic_bnx2x_ops;
 	cp->start_hw = cnic_start_bnx2x_hw;
 	cp->stop_hw = cnic_stop_bnx2x_hw;
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index b328f6c..4456260 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -220,7 +220,7 @@
 #define ULP_F_INIT	0
 #define ULP_F_START	1
 #define ULP_F_CALL_PENDING	2
-	struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
+	struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE];
 
 	unsigned long cnic_local_flags;
 #define	CNIC_LCL_FL_KWQ_INIT		0x0
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 9f44e0f..e01b49e 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
 #ifndef CNIC_IF_H
 #define CNIC_IF_H
 
-#define CNIC_MODULE_VERSION	"2.2.12"
-#define CNIC_MODULE_RELDATE	"Jan 03, 2011"
+#define CNIC_MODULE_VERSION	"2.2.13"
+#define CNIC_MODULE_RELDATE	"Jan 31, 2011"
 
 #define CNIC_ULP_RDMA		0
 #define CNIC_ULP_ISCSI		1
@@ -159,6 +159,9 @@
 	u32		drv_state;
 #define CNIC_DRV_STATE_REGD		0x00000001
 #define CNIC_DRV_STATE_USING_MSIX	0x00000002
+#define CNIC_DRV_STATE_NO_ISCSI_OOO	0x00000004
+#define CNIC_DRV_STATE_NO_ISCSI		0x00000008
+#define CNIC_DRV_STATE_NO_FCOE		0x00000010
 	u32		chip_id;
 	u32		max_kwqe_pending;
 	struct pci_dev	*pdev;
@@ -176,6 +179,7 @@
 	u32		fcoe_init_cid;
 	u16		iscsi_l2_client_id;
 	u16		iscsi_l2_cid;
+	u8		iscsi_mac[ETH_ALEN];
 
 	int		num_irq;
 	struct cnic_irq	irq_arr[MAX_CNIC_VEC];
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index ef02aa6..862804f 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -186,9 +186,10 @@
 				dev = NULL;
 				if (grp)
 					dev = vlan_group_get_device(grp, vlan);
-			} else
+			} else if (netif_is_bond_slave(dev)) {
 				while (dev->master)
 					dev = dev->master;
+			}
 			return dev;
 		}
 	}
@@ -967,8 +968,6 @@
 		cxgb_neigh_update((struct neighbour *)ctx);
 		break;
 	}
-	case (NETEVENT_PMTU_UPDATE):
-		break;
 	case (NETEVENT_REDIRECT):{
 		struct netevent_redirect *nr = ctx;
 		cxgb_redirect(nr->old, nr->new);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 059c1ee..5352c8a 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -2471,7 +2471,6 @@
 	case NETEVENT_NEIGH_UPDATE:
 		check_neigh_update(data);
 		break;
-	case NETEVENT_PMTU_UPDATE:
 	case NETEVENT_REDIRECT:
 	default:
 		break;
@@ -2710,6 +2709,8 @@
 	struct port_info *pi = netdev_priv(dev);
 	struct adapter *adapter = pi->adapter;
 
+	netif_carrier_off(dev);
+
 	if (!(adapter->flags & FULL_INIT_DONE)) {
 		err = cxgb_up(adapter);
 		if (err < 0)
@@ -3661,7 +3662,6 @@
 		pi->xact_addr_filt = -1;
 		pi->rx_offload = RX_CSO;
 		pi->port_id = i;
-		netif_carrier_off(netdev);
 		netdev->irq = pdev->irq;
 
 		netdev->features |= NETIF_F_SG | TSO_FLAGS;
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 56166ae..6aad64d 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -2040,7 +2040,7 @@
 {
 	int i;
 
-	BUG_ON(adapter->debugfs_root == NULL);
+	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
 
 	/*
 	 * Debugfs support is best effort.
@@ -2061,7 +2061,7 @@
  */
 static void cleanup_debugfs(struct adapter *adapter)
 {
-	BUG_ON(adapter->debugfs_root == NULL);
+	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
 
 	/*
 	 * Unlike our sister routine cleanup_proc(), we don't need to remove
@@ -2489,17 +2489,6 @@
 	struct net_device *netdev;
 
 	/*
-	 * Vet our module parameters.
-	 */
-	if (msi != MSI_MSIX && msi != MSI_MSI) {
-		dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d"
-			" (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX,
-			MSI_MSI);
-		err = -EINVAL;
-		goto err_out;
-	}
-
-	/*
 	 * Print our driver banner the first time we're called to initialize a
 	 * device.
 	 */
@@ -2711,11 +2700,11 @@
 	/*
 	 * Set up our debugfs entries.
 	 */
-	if (cxgb4vf_debugfs_root) {
+	if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
 		adapter->debugfs_root =
 			debugfs_create_dir(pci_name(pdev),
 					   cxgb4vf_debugfs_root);
-		if (adapter->debugfs_root == NULL)
+		if (IS_ERR_OR_NULL(adapter->debugfs_root))
 			dev_warn(&pdev->dev, "could not create debugfs"
 				 " directory");
 		else
@@ -2770,7 +2759,7 @@
 	 */
 
 err_free_debugfs:
-	if (adapter->debugfs_root) {
+	if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
 		cleanup_debugfs(adapter);
 		debugfs_remove_recursive(adapter->debugfs_root);
 	}
@@ -2802,7 +2791,6 @@
 err_disable_device:
 	pci_disable_device(pdev);
 
-err_out:
 	return err;
 }
 
@@ -2840,7 +2828,7 @@
 		/*
 		 * Tear down our debugfs entries.
 		 */
-		if (adapter->debugfs_root) {
+		if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
 			cleanup_debugfs(adapter);
 			debugfs_remove_recursive(adapter->debugfs_root);
 		}
@@ -2874,6 +2862,46 @@
 }
 
 /*
+ * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
+ * delivery.
+ */
+static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev)
+{
+	struct adapter *adapter;
+	int pidx;
+
+	adapter = pci_get_drvdata(pdev);
+	if (!adapter)
+		return;
+
+	/*
+	 * Disable all Virtual Interfaces.  This will shut down the
+	 * delivery of all ingress packets into the chip for these
+	 * Virtual Interfaces.
+	 */
+	for_each_port(adapter, pidx) {
+		struct net_device *netdev;
+		struct port_info *pi;
+
+		if (!test_bit(pidx, &adapter->registered_device_map))
+			continue;
+
+		netdev = adapter->port[pidx];
+		if (!netdev)
+			continue;
+
+		pi = netdev_priv(netdev);
+		t4vf_enable_vi(adapter, pi->viid, false, false);
+	}
+
+	/*
+	 * Free up all Queues which will prevent further DMA and
+	 * Interrupts allowing various internal pathways to drain.
+	 */
+	t4vf_free_sge_resources(adapter);
+}
+
+/*
  * PCI Device registration data structures.
  */
 #define CH_DEVICE(devid, idx) \
@@ -2906,6 +2934,7 @@
 	.id_table	= cxgb4vf_pci_tbl,
 	.probe		= cxgb4vf_pci_probe,
 	.remove		= __devexit_p(cxgb4vf_pci_remove),
+	.shutdown	= __devexit_p(cxgb4vf_pci_shutdown),
 };
 
 /*
@@ -2915,14 +2944,25 @@
 {
 	int ret;
 
+	/*
+	 * Vet our module parameters.
+	 */
+	if (msi != MSI_MSIX && msi != MSI_MSI) {
+		printk(KERN_WARNING KBUILD_MODNAME
+		       ": bad module parameter msi=%d; must be %d"
+		       " (MSI-X or MSI) or %d (MSI)\n",
+		       msi, MSI_MSIX, MSI_MSI);
+		return -EINVAL;
+	}
+
 	/* Debugfs support is optional, just warn if this fails */
 	cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
-	if (!cxgb4vf_debugfs_root)
+	if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
 		printk(KERN_WARNING KBUILD_MODNAME ": could not create"
 		       " debugfs entry, continuing\n");
 
 	ret = pci_register_driver(&cxgb4vf_driver);
-	if (ret < 0)
+	if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
 		debugfs_remove(cxgb4vf_debugfs_root);
 	return ret;
 }
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index 0f51c80..192db22 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -171,7 +171,7 @@
 	delay_idx = 0;
 	ms = delay[0];
 
-	for (i = 0; i < 500; i += ms) {
+	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
 		if (sleep_ok) {
 			ms = delay[delay_idx];
 			if (delay_idx < ARRAY_SIZE(delay) - 1)
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 1b48b68..8b0084d 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1094,7 +1094,7 @@
 				}
 			}
 			/* Change buffer ownership for this last frame, back to the adapter */
-			for (; lp->rx_old != entry; lp->rx_old = (++lp->rx_old) & lp->rxRingMask) {
+			for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) {
 				writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base);
 			}
 			writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
@@ -1103,7 +1103,7 @@
 		/*
 		   ** Update entry information
 		 */
-		lp->rx_new = (++lp->rx_new) & lp->rxRingMask;
+		lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask;
 	}
 
 	return 0;
@@ -1148,7 +1148,7 @@
 		}
 
 		/* Update all the pointers */
-		lp->tx_old = (++lp->tx_old) & lp->txRingMask;
+		lp->tx_old = (lp->tx_old + 1) & lp->txRingMask;
 	}
 
 	return 0;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index e1a8216..c05db60 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1753,8 +1753,6 @@
 
 	/* Free all the skbuffs in the queue. */
 	for (i = 0; i < RX_RING_SIZE; i++) {
-		np->rx_ring[i].status = 0;
-		np->rx_ring[i].fraginfo = 0;
 		skb = np->rx_skbuff[i];
 		if (skb) {
 			pci_unmap_single(np->pdev,
@@ -1763,6 +1761,8 @@
 			dev_kfree_skb (skb);
 			np->rx_skbuff[i] = NULL;
 		}
+		np->rx_ring[i].status = 0;
+		np->rx_ring[i].fraginfo = 0;
 	}
 	for (i = 0; i < TX_RING_SIZE; i++) {
 		skb = np->tx_skbuff[i];
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index aed223b..7501d97 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -124,6 +124,7 @@
 	case M88E1000_I_PHY_ID:
 	case M88E1011_I_PHY_ID:
 	case M88E1111_I_PHY_ID:
+	case M88E1118_E_PHY_ID:
 		hw->phy_type = e1000_phy_m88;
 		break;
 	case IGP01E1000_I_PHY_ID:
@@ -3222,7 +3223,8 @@
 		break;
 	case e1000_ce4100:
 		if ((hw->phy_id == RTL8211B_PHY_ID) ||
-		    (hw->phy_id == RTL8201N_PHY_ID))
+		    (hw->phy_id == RTL8201N_PHY_ID) ||
+		    (hw->phy_id == M88E1118_E_PHY_ID))
 			match = true;
 		break;
 	case e1000_82541:
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 196eeda..c70b23d 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -2917,6 +2917,7 @@
 #define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
 #define M88E1011_I_REV_4   0x04
 #define M88E1111_I_PHY_ID  0x01410CC0
+#define M88E1118_E_PHY_ID  0x01410E40
 #define L1LXT971A_PHY_ID   0x001378E0
 
 #define RTL8211B_PHY_ID    0x001CC910
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index e610e13..00bf595 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -364,6 +364,7 @@
 	/* structs defined in e1000_hw.h */
 	struct e1000_hw hw;
 
+	spinlock_t stats64_lock;
 	struct e1000_hw_stats stats;
 	struct e1000_phy_info phy_info;
 	struct e1000_phy_stats phy_stats;
@@ -494,7 +495,9 @@
 extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
 extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
 extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
-extern void e1000e_update_stats(struct e1000_adapter *adapter);
+extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+                                                    struct rtnl_link_stats64
+                                                    *stats);
 extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
 extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
 extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index fa08b63..65ef9b5 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -46,15 +46,15 @@
 };
 
 #define E1000_STAT(str, m) { \
-			.stat_string = str, \
-			.type = E1000_STATS, \
-			.sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
-			.stat_offset = offsetof(struct e1000_adapter, m) }
+		.stat_string = str, \
+		.type = E1000_STATS, \
+		.sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
+		.stat_offset = offsetof(struct e1000_adapter, m) }
 #define E1000_NETDEV_STAT(str, m) { \
-			.stat_string = str, \
-			.type = NETDEV_STATS, \
-			.sizeof_stat = sizeof(((struct net_device *)0)->m), \
-			.stat_offset = offsetof(struct net_device, m) }
+		.stat_string = str, \
+		.type = NETDEV_STATS, \
+		.sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \
+		.stat_offset = offsetof(struct rtnl_link_stats64, m) }
 
 static const struct e1000_stats e1000_gstrings_stats[] = {
 	E1000_STAT("rx_packets", stats.gprc),
@@ -65,21 +65,21 @@
 	E1000_STAT("tx_broadcast", stats.bptc),
 	E1000_STAT("rx_multicast", stats.mprc),
 	E1000_STAT("tx_multicast", stats.mptc),
-	E1000_NETDEV_STAT("rx_errors", stats.rx_errors),
-	E1000_NETDEV_STAT("tx_errors", stats.tx_errors),
-	E1000_NETDEV_STAT("tx_dropped", stats.tx_dropped),
+	E1000_NETDEV_STAT("rx_errors", rx_errors),
+	E1000_NETDEV_STAT("tx_errors", tx_errors),
+	E1000_NETDEV_STAT("tx_dropped", tx_dropped),
 	E1000_STAT("multicast", stats.mprc),
 	E1000_STAT("collisions", stats.colc),
-	E1000_NETDEV_STAT("rx_length_errors", stats.rx_length_errors),
-	E1000_NETDEV_STAT("rx_over_errors", stats.rx_over_errors),
+	E1000_NETDEV_STAT("rx_length_errors", rx_length_errors),
+	E1000_NETDEV_STAT("rx_over_errors", rx_over_errors),
 	E1000_STAT("rx_crc_errors", stats.crcerrs),
-	E1000_NETDEV_STAT("rx_frame_errors", stats.rx_frame_errors),
+	E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors),
 	E1000_STAT("rx_no_buffer_count", stats.rnbc),
 	E1000_STAT("rx_missed_errors", stats.mpc),
 	E1000_STAT("tx_aborted_errors", stats.ecol),
 	E1000_STAT("tx_carrier_errors", stats.tncrs),
-	E1000_NETDEV_STAT("tx_fifo_errors", stats.tx_fifo_errors),
-	E1000_NETDEV_STAT("tx_heartbeat_errors", stats.tx_heartbeat_errors),
+	E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors),
+	E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors),
 	E1000_STAT("tx_window_errors", stats.latecol),
 	E1000_STAT("tx_abort_late_coll", stats.latecol),
 	E1000_STAT("tx_deferred_ok", stats.dc),
@@ -684,20 +684,13 @@
 	rx_old = adapter->rx_ring;
 
 	err = -ENOMEM;
-	tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+	tx_ring = kmemdup(tx_old, sizeof(struct e1000_ring), GFP_KERNEL);
 	if (!tx_ring)
 		goto err_alloc_tx;
-	/*
-	 * use a memcpy to save any previously configured
-	 * items like napi structs from having to be
-	 * reinitialized
-	 */
-	memcpy(tx_ring, tx_old, sizeof(struct e1000_ring));
 
-	rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+	rx_ring = kmemdup(rx_old, sizeof(struct e1000_ring), GFP_KERNEL);
 	if (!rx_ring)
 		goto err_alloc_rx;
-	memcpy(rx_ring, rx_old, sizeof(struct e1000_ring));
 
 	adapter->tx_ring = tx_ring;
 	adapter->rx_ring = rx_ring;
@@ -1255,7 +1248,6 @@
 {
 	struct e1000_hw *hw = &adapter->hw;
 	u32 ctrl_reg = 0;
-	u32 stat_reg = 0;
 	u16 phy_reg = 0;
 	s32 ret_val = 0;
 
@@ -1363,8 +1355,7 @@
 		 * Set the ILOS bit on the fiber Nic if half duplex link is
 		 * detected.
 		 */
-		stat_reg = er32(STATUS);
-		if ((stat_reg & E1000_STATUS_FD) == 0)
+		if ((er32(STATUS) & E1000_STATUS_FD) == 0)
 			ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
 	}
 
@@ -1972,8 +1963,15 @@
 static int e1000_nway_reset(struct net_device *netdev)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
-	if (netif_running(netdev))
-		e1000e_reinit_locked(adapter);
+
+	if (!netif_running(netdev))
+		return -EAGAIN;
+
+	if (!adapter->hw.mac.autoneg)
+		return -EINVAL;
+
+	e1000e_reinit_locked(adapter);
+
 	return 0;
 }
 
@@ -1982,14 +1980,15 @@
 				    u64 *data)
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct rtnl_link_stats64 net_stats;
 	int i;
 	char *p = NULL;
 
-	e1000e_update_stats(adapter);
+	e1000e_get_stats64(netdev, &net_stats);
 	for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
 		switch (e1000_gstrings_stats[i].type) {
 		case NETDEV_STATS:
-			p = (char *) netdev +
+			p = (char *) &net_stats +
 					e1000_gstrings_stats[i].stat_offset;
 			break;
 		case E1000_STATS:
@@ -2014,7 +2013,7 @@
 
 	switch (stringset) {
 	case ETH_SS_TEST:
-		memcpy(data, *e1000_gstrings_test, sizeof(e1000_gstrings_test));
+		memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
 		break;
 	case ETH_SS_STATS:
 		for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index fb46974..232b42b 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -2104,7 +2104,6 @@
 {
 	union ich8_hws_flash_status hsfsts;
 	s32 ret_val = -E1000_ERR_NVM;
-	s32 i = 0;
 
 	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
 
@@ -2140,6 +2139,8 @@
 		ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
 		ret_val = 0;
 	} else {
+		s32 i = 0;
+
 		/*
 		 * Otherwise poll for sometime so the current
 		 * cycle has a chance to end before giving up.
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 68aa174..96921de 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1978,15 +1978,15 @@
 {
 	struct e1000_nvm_info *nvm = &hw->nvm;
 	u32 eecd = er32(EECD);
-	u16 timeout = 0;
 	u8 spi_stat_reg;
 
 	if (nvm->type == e1000_nvm_eeprom_spi) {
+		u16 timeout = NVM_MAX_RETRY_SPI;
+
 		/* Clear SK and CS */
 		eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
 		ew32(EECD, eecd);
 		udelay(1);
-		timeout = NVM_MAX_RETRY_SPI;
 
 		/*
 		 * Read "Status Register" repeatedly until the LSB is cleared.
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 1c18f26..ec0b803 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -900,8 +900,6 @@
 
 	adapter->total_rx_bytes += total_rx_bytes;
 	adapter->total_rx_packets += total_rx_packets;
-	netdev->stats.rx_bytes += total_rx_bytes;
-	netdev->stats.rx_packets += total_rx_packets;
 	return cleaned;
 }
 
@@ -937,6 +935,9 @@
 	u16 phy_status, phy_1000t_status, phy_ext_status;
 	u16 pci_status;
 
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
 	e1e_rphy(hw, PHY_STATUS, &phy_status);
 	e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
 	e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1057,8 +1058,6 @@
 	}
 	adapter->total_tx_bytes += total_tx_bytes;
 	adapter->total_tx_packets += total_tx_packets;
-	netdev->stats.tx_bytes += total_tx_bytes;
-	netdev->stats.tx_packets += total_tx_packets;
 	return count < tx_ring->count;
 }
 
@@ -1245,8 +1244,6 @@
 
 	adapter->total_rx_bytes += total_rx_bytes;
 	adapter->total_rx_packets += total_rx_packets;
-	netdev->stats.rx_bytes += total_rx_bytes;
-	netdev->stats.rx_packets += total_rx_packets;
 	return cleaned;
 }
 
@@ -1426,8 +1423,6 @@
 
 	adapter->total_rx_bytes += total_rx_bytes;
 	adapter->total_rx_packets += total_rx_packets;
-	netdev->stats.rx_bytes += total_rx_bytes;
-	netdev->stats.rx_packets += total_rx_packets;
 	return cleaned;
 }
 
@@ -1506,6 +1501,9 @@
 	struct e1000_adapter *adapter = container_of(work,
 					struct e1000_adapter, downshift_task);
 
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
 	e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
 }
 
@@ -1851,7 +1849,9 @@
 	int err = 0, vector = 0;
 
 	if (strlen(netdev->name) < (IFNAMSIZ - 5))
-		sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
+		snprintf(adapter->rx_ring->name,
+			 sizeof(adapter->rx_ring->name) - 1,
+			 "%s-rx-0", netdev->name);
 	else
 		memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
 	err = request_irq(adapter->msix_entries[vector].vector,
@@ -1864,7 +1864,9 @@
 	vector++;
 
 	if (strlen(netdev->name) < (IFNAMSIZ - 5))
-		sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
+		snprintf(adapter->tx_ring->name,
+			 sizeof(adapter->tx_ring->name) - 1,
+			 "%s-tx-0", netdev->name);
 	else
 		memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
 	err = request_irq(adapter->msix_entries[vector].vector,
@@ -2728,7 +2730,6 @@
 {
 	struct e1000_hw *hw = &adapter->hw;
 	u32 rctl, rfctl;
-	u32 psrctl = 0;
 	u32 pages = 0;
 
 	/* Workaround Si errata on 82579 - configure jumbo frame flow */
@@ -2827,6 +2828,8 @@
 		adapter->rx_ps_pages = 0;
 
 	if (adapter->rx_ps_pages) {
+		u32 psrctl = 0;
+
 		/* Configure extra packet-split registers */
 		rfctl = er32(RFCTL);
 		rfctl |= E1000_RFCTL_EXTEN;
@@ -3028,7 +3031,6 @@
 	struct netdev_hw_addr *ha;
 	u8  *mta_list;
 	u32 rctl;
-	int i;
 
 	/* Check for Promiscuous and All Multicast modes */
 
@@ -3051,12 +3053,13 @@
 	ew32(RCTL, rctl);
 
 	if (!netdev_mc_empty(netdev)) {
+		int i = 0;
+
 		mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
 		if (!mta_list)
 			return;
 
 		/* prepare a packed array of only addresses. */
-		i = 0;
 		netdev_for_each_mc_addr(ha, netdev)
 			memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
 
@@ -3338,6 +3341,23 @@
 	return 0;
 }
 
+static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (!(adapter->flags2 & FLAG2_DMA_BURST))
+		return;
+
+	/* flush pending descriptor writebacks to memory */
+	ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+	ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
+
+	/* execute the writes immediately */
+	e1e_flush();
+}
+
+static void e1000e_update_stats(struct e1000_adapter *adapter);
+
 void e1000e_down(struct e1000_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
@@ -3372,11 +3392,19 @@
 	del_timer_sync(&adapter->phy_info_timer);
 
 	netif_carrier_off(netdev);
+
+	spin_lock(&adapter->stats64_lock);
+	e1000e_update_stats(adapter);
+	spin_unlock(&adapter->stats64_lock);
+
 	adapter->link_speed = 0;
 	adapter->link_duplex = 0;
 
 	if (!pci_channel_offline(adapter->pdev))
 		e1000e_reset(adapter);
+
+	e1000e_flush_descriptors(adapter);
+
 	e1000_clean_tx_ring(adapter);
 	e1000_clean_rx_ring(adapter);
 
@@ -3413,6 +3441,8 @@
 	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
 	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
 
+	spin_lock_init(&adapter->stats64_lock);
+
 	e1000e_set_interrupt_capability(adapter);
 
 	if (e1000_alloc_queues(adapter))
@@ -3765,6 +3795,10 @@
 {
 	struct e1000_adapter *adapter = container_of(work,
 					struct e1000_adapter, update_phy_task);
+
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
 	e1000_get_phy_info(&adapter->hw);
 }
 
@@ -3775,6 +3809,10 @@
 static void e1000_update_phy_info(unsigned long data)
 {
 	struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
 	schedule_work(&adapter->update_phy_task);
 }
 
@@ -3886,7 +3924,7 @@
  * e1000e_update_stats - Update the board statistics counters
  * @adapter: board private structure
  **/
-void e1000e_update_stats(struct e1000_adapter *adapter)
+static void e1000e_update_stats(struct e1000_adapter *adapter)
 {
 	struct net_device *netdev = adapter->netdev;
 	struct e1000_hw *hw = &adapter->hw;
@@ -3998,10 +4036,11 @@
 {
 	struct e1000_hw *hw = &adapter->hw;
 	struct e1000_phy_regs *phy = &adapter->phy_regs;
-	int ret_val;
 
 	if ((er32(STATUS) & E1000_STATUS_LU) &&
 	    (adapter->hw.phy.media_type == e1000_media_type_copper)) {
+		int ret_val;
+
 		ret_val  = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
 		ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
 		ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
@@ -4147,7 +4186,9 @@
 	struct e1000_ring *tx_ring = adapter->tx_ring;
 	struct e1000_hw *hw = &adapter->hw;
 	u32 link, tctl;
-	int tx_pending = 0;
+
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
 
 	link = e1000e_has_link(adapter);
 	if ((netif_carrier_ok(netdev)) && link) {
@@ -4285,7 +4326,9 @@
 	}
 
 link_up:
+	spin_lock(&adapter->stats64_lock);
 	e1000e_update_stats(adapter);
+	spin_unlock(&adapter->stats64_lock);
 
 	mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
 	adapter->tpt_old = adapter->stats.tpt;
@@ -4299,21 +4342,17 @@
 
 	e1000e_update_adaptive(&adapter->hw);
 
-	if (!netif_carrier_ok(netdev)) {
-		tx_pending = (e1000_desc_unused(tx_ring) + 1 <
-			       tx_ring->count);
-		if (tx_pending) {
-			/*
-			 * We've lost link, so the controller stops DMA,
-			 * but we've got queued Tx work that's never going
-			 * to get done, so reset controller to flush Tx.
-			 * (Do the reset outside of interrupt context).
-			 */
-			adapter->tx_timeout_count++;
-			schedule_work(&adapter->reset_task);
-			/* return immediately since reset is imminent */
-			return;
-		}
+	if (!netif_carrier_ok(netdev) &&
+	    (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
+		/*
+		 * We've lost link, so the controller stops DMA,
+		 * but we've got queued Tx work that's never going
+		 * to get done, so reset controller to flush Tx.
+		 * (Do the reset outside of interrupt context).
+		 */
+		schedule_work(&adapter->reset_task);
+		/* return immediately since reset is imminent */
+		return;
 	}
 
 	/* Simple mode for Interrupt Throttle Rate (ITR) */
@@ -4338,19 +4377,12 @@
 	else
 		ew32(ICS, E1000_ICS_RXDMT0);
 
+	/* flush pending descriptors to memory before detecting Tx hang */
+	e1000e_flush_descriptors(adapter);
+
 	/* Force detection of hung controller every watchdog period */
 	adapter->detect_tx_hung = 1;
 
-	/* flush partial descriptors to memory before detecting Tx hang */
-	if (adapter->flags2 & FLAG2_DMA_BURST) {
-		ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
-		ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
-		/*
-		 * no need to flush the writes because the timeout code does
-		 * an er32 first thing
-		 */
-	}
-
 	/*
 	 * With 82571 controllers, LAA may be overwritten due to controller
 	 * reset from the other port. Set the appropriate LAA in RAR[0]
@@ -4384,13 +4416,13 @@
 	u32 cmd_length = 0;
 	u16 ipcse = 0, tucse, mss;
 	u8 ipcss, ipcso, tucss, tucso, hdr_len;
-	int err;
 
 	if (!skb_is_gso(skb))
 		return 0;
 
 	if (skb_header_cloned(skb)) {
-		err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+		int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+
 		if (err)
 			return err;
 	}
@@ -4888,6 +4920,10 @@
 	struct e1000_adapter *adapter;
 	adapter = container_of(work, struct e1000_adapter, reset_task);
 
+	/* don't run the task if already down */
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
 	if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
 	      (adapter->flags & FLAG_RX_RESTART_NOW))) {
 		e1000e_dump(adapter);
@@ -4897,16 +4933,55 @@
 }
 
 /**
- * e1000_get_stats - Get System Network Statistics
+ * e1000_get_stats64 - Get System Network Statistics
  * @netdev: network interface device structure
+ * @stats: rtnl_link_stats64 pointer
  *
  * Returns the address of the device statistics structure.
- * The statistics are actually updated from the timer callback.
  **/
-static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
+struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+                                             struct rtnl_link_stats64 *stats)
 {
-	/* only return the current stats */
-	return &netdev->stats;
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	memset(stats, 0, sizeof(struct rtnl_link_stats64));
+	spin_lock(&adapter->stats64_lock);
+	e1000e_update_stats(adapter);
+	/* Fill out the OS statistics structure */
+	stats->rx_bytes = adapter->stats.gorc;
+	stats->rx_packets = adapter->stats.gprc;
+	stats->tx_bytes = adapter->stats.gotc;
+	stats->tx_packets = adapter->stats.gptc;
+	stats->multicast = adapter->stats.mprc;
+	stats->collisions = adapter->stats.colc;
+
+	/* Rx Errors */
+
+	/*
+	 * RLEC on some newer hardware can be incorrect so build
+	 * our own version based on RUC and ROC
+	 */
+	stats->rx_errors = adapter->stats.rxerrc +
+		adapter->stats.crcerrs + adapter->stats.algnerrc +
+		adapter->stats.ruc + adapter->stats.roc +
+		adapter->stats.cexterr;
+	stats->rx_length_errors = adapter->stats.ruc +
+					      adapter->stats.roc;
+	stats->rx_crc_errors = adapter->stats.crcerrs;
+	stats->rx_frame_errors = adapter->stats.algnerrc;
+	stats->rx_missed_errors = adapter->stats.mpc;
+
+	/* Tx Errors */
+	stats->tx_errors = adapter->stats.ecol +
+				       adapter->stats.latecol;
+	stats->tx_aborted_errors = adapter->stats.ecol;
+	stats->tx_window_errors = adapter->stats.latecol;
+	stats->tx_carrier_errors = adapter->stats.tncrs;
+
+	/* Tx Dropped needs to be maintained elsewhere */
+
+	spin_unlock(&adapter->stats64_lock);
+	return stats;
 }
 
 /**
@@ -5476,9 +5551,10 @@
 {
 	struct net_device *netdev = data;
 	struct e1000_adapter *adapter = netdev_priv(netdev);
-	int vector, msix_irq;
 
 	if (adapter->msix_entries) {
+		int vector, msix_irq;
+
 		vector = 0;
 		msix_irq = adapter->msix_entries[vector].vector;
 		disable_irq(msix_irq);
@@ -5675,7 +5751,7 @@
 	.ndo_open		= e1000_open,
 	.ndo_stop		= e1000_close,
 	.ndo_start_xmit		= e1000_xmit_frame,
-	.ndo_get_stats		= e1000_get_stats,
+	.ndo_get_stats64	= e1000e_get_stats64,
 	.ndo_set_multicast_list	= e1000_set_multi,
 	.ndo_set_mac_address	= e1000_set_mac,
 	.ndo_change_mtu		= e1000_change_mtu,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 6bea051..6ae31fc 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -2409,9 +2409,7 @@
 s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
 {
 	s32 ret_val;
-	u32 page_select = 0;
 	u32 page = offset >> IGP_PAGE_SHIFT;
-	u32 page_shift = 0;
 
 	ret_val = hw->phy.ops.acquire(hw);
 	if (ret_val)
@@ -2427,6 +2425,8 @@
 	hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
 
 	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		u32 page_shift, page_select;
+
 		/*
 		 * Page select is register 31 for phy address 1 and 22 for
 		 * phy address 2 and 3. Page select is shifted only for
@@ -2468,9 +2468,7 @@
 s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
 {
 	s32 ret_val;
-	u32 page_select = 0;
 	u32 page = offset >> IGP_PAGE_SHIFT;
-	u32 page_shift = 0;
 
 	ret_val = hw->phy.ops.acquire(hw);
 	if (ret_val)
@@ -2486,6 +2484,8 @@
 	hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
 
 	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		u32 page_shift, page_select;
+
 		/*
 		 * Page select is register 31 for phy address 1 and 22 for
 		 * phy address 2 and 3. Page select is shifted only for
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index 112c5aa..907b05a 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -812,7 +812,7 @@
 	if (netif_msg_hw(priv))
 		printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n",
 			 endptr + 1);
-	enc28j60_mem_read(priv, endptr + 1, sizeof(tsv), tsv);
+	enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv);
 }
 
 static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index e7b6c31..2e573be 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -1,5 +1,5 @@
 obj-$(CONFIG_ENIC) := enic.o
 
 enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
-	enic_res.o vnic_dev.o vnic_rq.o vnic_vic.o
+	enic_res.o enic_dev.o vnic_dev.o vnic_rq.o vnic_vic.o
 
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index a937f49..aee5256 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,13 +32,13 @@
 
 #define DRV_NAME		"enic"
 #define DRV_DESCRIPTION		"Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION		"1.4.1.10"
-#define DRV_COPYRIGHT		"Copyright 2008-2010 Cisco Systems, Inc"
+#define DRV_VERSION		"2.1.1.9"
+#define DRV_COPYRIGHT		"Copyright 2008-2011 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX		6
 
-#define ENIC_WQ_MAX		8
-#define ENIC_RQ_MAX		8
+#define ENIC_WQ_MAX		1
+#define ENIC_RQ_MAX		1
 #define ENIC_CQ_MAX		(ENIC_WQ_MAX + ENIC_RQ_MAX)
 #define ENIC_INTR_MAX		(ENIC_CQ_MAX + 2)
 
@@ -49,7 +49,7 @@
 	void *devid;
 };
 
-#define ENIC_SET_APPLIED		(1 << 0)
+#define ENIC_PORT_REQUEST_APPLIED	(1 << 0)
 #define ENIC_SET_REQUEST		(1 << 1)
 #define ENIC_SET_NAME			(1 << 2)
 #define ENIC_SET_INSTANCE		(1 << 3)
@@ -101,7 +101,6 @@
 	/* receive queue cache line section */
 	____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
 	unsigned int rq_count;
-	int (*rq_alloc_buf)(struct vnic_rq *rq);
 	u64 rq_truncated_pkts;
 	u64 rq_bad_fcs;
 	struct napi_struct napi[ENIC_RQ_MAX];
diff --git a/drivers/net/enic/enic_dev.c b/drivers/net/enic/enic_dev.c
new file mode 100644
index 0000000..37ad3a1
--- /dev/null
+++ b/drivers/net/enic/enic_dev.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright 2011 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+
+#include "vnic_dev.h"
+#include "vnic_vic.h"
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_dev.h"
+
+int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_fw_info(enic->vdev, fw_info);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_stats_dump(enic->vdev, vstats);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+int enic_dev_add_station_addr(struct enic *enic)
+{
+	int err;
+
+	if (!is_valid_ether_addr(enic->netdev->dev_addr))
+		return -EADDRNOTAVAIL;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+int enic_dev_del_station_addr(struct enic *enic)
+{
+	int err;
+
+	if (!is_valid_ether_addr(enic->netdev->dev_addr))
+		return -EADDRNOTAVAIL;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
+	int broadcast, int promisc, int allmulti)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_packet_filter(enic->vdev, directed,
+		multicast, broadcast, promisc, allmulti);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+int enic_dev_add_addr(struct enic *enic, u8 *addr)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_add_addr(enic->vdev, addr);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+int enic_dev_del_addr(struct enic *enic, u8 *addr)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_del_addr(enic->vdev, addr);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+int enic_dev_notify_unset(struct enic *enic)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_notify_unset(enic->vdev);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+int enic_dev_hang_notify(struct enic *enic)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_hang_notify(enic->vdev);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
+		IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+int enic_dev_enable(struct enic *enic)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_enable_wait(enic->vdev);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+int enic_dev_disable(struct enic *enic)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_disable(enic->vdev);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+int enic_vnic_dev_deinit(struct enic *enic)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_deinit(enic->vdev);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_init_prov(enic->vdev,
+		(u8 *)vp, vic_provinfo_size(vp));
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+int enic_dev_init_done(struct enic *enic, int *done, int *error)
+{
+	int err;
+
+	spin_lock(&enic->devcmd_lock);
+	err = vnic_dev_init_done(enic->vdev, done, error);
+	spin_unlock(&enic->devcmd_lock);
+
+	return err;
+}
+
+/* rtnl lock is held */
+void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+	struct enic *enic = netdev_priv(netdev);
+
+	spin_lock(&enic->devcmd_lock);
+	enic_add_vlan(enic, vid);
+	spin_unlock(&enic->devcmd_lock);
+}
+
+/* rtnl lock is held */
+void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+	struct enic *enic = netdev_priv(netdev);
+
+	spin_lock(&enic->devcmd_lock);
+	enic_del_vlan(enic, vid);
+	spin_unlock(&enic->devcmd_lock);
+}
diff --git a/drivers/net/enic/enic_dev.h b/drivers/net/enic/enic_dev.h
new file mode 100644
index 0000000..495f57f
--- /dev/null
+++ b/drivers/net/enic/enic_dev.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2011 Cisco Systems, Inc.  All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _ENIC_DEV_H_
+#define _ENIC_DEV_H_
+
+int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info);
+int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats);
+int enic_dev_add_station_addr(struct enic *enic);
+int enic_dev_del_station_addr(struct enic *enic);
+int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
+	int broadcast, int promisc, int allmulti);
+int enic_dev_add_addr(struct enic *enic, u8 *addr);
+int enic_dev_del_addr(struct enic *enic, u8 *addr);
+void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+int enic_dev_notify_unset(struct enic *enic);
+int enic_dev_hang_notify(struct enic *enic);
+int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
+int enic_dev_enable(struct enic *enic);
+int enic_dev_disable(struct enic *enic);
+int enic_vnic_dev_deinit(struct enic *enic);
+int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp);
+int enic_dev_init_done(struct enic *enic, int *done, int *error);
+
+#endif /* _ENIC_DEV_H_ */
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index a0af48c..4f1710e 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -44,6 +44,7 @@
 #include "vnic_vic.h"
 #include "enic_res.h"
 #include "enic.h"
+#include "enic_dev.h"
 
 #define ENIC_NOTIFY_TIMER_PERIOD	(2 * HZ)
 #define WQ_ENET_MAX_DESC_LEN		(1 << WQ_ENET_LEN_BITS)
@@ -190,18 +191,6 @@
 	return 0;
 }
 
-static int enic_dev_fw_info(struct enic *enic,
-	struct vnic_devcmd_fw_info **fw_info)
-{
-	int err;
-
-	spin_lock(&enic->devcmd_lock);
-	err = vnic_dev_fw_info(enic->vdev, fw_info);
-	spin_unlock(&enic->devcmd_lock);
-
-	return err;
-}
-
 static void enic_get_drvinfo(struct net_device *netdev,
 	struct ethtool_drvinfo *drvinfo)
 {
@@ -246,17 +235,6 @@
 	}
 }
 
-static int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
-{
-	int err;
-
-	spin_lock(&enic->devcmd_lock);
-	err = vnic_dev_stats_dump(enic->vdev, vstats);
-	spin_unlock(&enic->devcmd_lock);
-
-	return err;
-}
-
 static void enic_get_ethtool_stats(struct net_device *netdev,
 	struct ethtool_stats *stats, u64 *data)
 {
@@ -896,9 +874,10 @@
 	return net_stats;
 }
 
-static void enic_reset_multicast_list(struct enic *enic)
+static void enic_reset_addr_lists(struct enic *enic)
 {
 	enic->mc_count = 0;
+	enic->uc_count = 0;
 	enic->flags = 0;
 }
 
@@ -919,32 +898,6 @@
 	return 0;
 }
 
-static int enic_dev_add_station_addr(struct enic *enic)
-{
-	int err = 0;
-
-	if (is_valid_ether_addr(enic->netdev->dev_addr)) {
-		spin_lock(&enic->devcmd_lock);
-		err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
-		spin_unlock(&enic->devcmd_lock);
-	}
-
-	return err;
-}
-
-static int enic_dev_del_station_addr(struct enic *enic)
-{
-	int err = 0;
-
-	if (is_valid_ether_addr(enic->netdev->dev_addr)) {
-		spin_lock(&enic->devcmd_lock);
-		err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
-		spin_unlock(&enic->devcmd_lock);
-	}
-
-	return err;
-}
-
 static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
 {
 	struct enic *enic = netdev_priv(netdev);
@@ -989,42 +942,7 @@
 	return enic_dev_add_station_addr(enic);
 }
 
-static int enic_dev_packet_filter(struct enic *enic, int directed,
-	int multicast, int broadcast, int promisc, int allmulti)
-{
-	int err;
-
-	spin_lock(&enic->devcmd_lock);
-	err = vnic_dev_packet_filter(enic->vdev, directed,
-		multicast, broadcast, promisc, allmulti);
-	spin_unlock(&enic->devcmd_lock);
-
-	return err;
-}
-
-static int enic_dev_add_addr(struct enic *enic, u8 *addr)
-{
-	int err;
-
-	spin_lock(&enic->devcmd_lock);
-	err = vnic_dev_add_addr(enic->vdev, addr);
-	spin_unlock(&enic->devcmd_lock);
-
-	return err;
-}
-
-static int enic_dev_del_addr(struct enic *enic, u8 *addr)
-{
-	int err;
-
-	spin_lock(&enic->devcmd_lock);
-	err = vnic_dev_del_addr(enic->vdev, addr);
-	spin_unlock(&enic->devcmd_lock);
-
-	return err;
-}
-
-static void enic_add_multicast_addr_list(struct enic *enic)
+static void enic_update_multicast_addr_list(struct enic *enic)
 {
 	struct net_device *netdev = enic->netdev;
 	struct netdev_hw_addr *ha;
@@ -1079,7 +997,7 @@
 	enic->mc_count = mc_count;
 }
 
-static void enic_add_unicast_addr_list(struct enic *enic)
+static void enic_update_unicast_addr_list(struct enic *enic)
 {
 	struct net_device *netdev = enic->netdev;
 	struct netdev_hw_addr *ha;
@@ -1156,9 +1074,9 @@
 	}
 
 	if (!promisc) {
-		enic_add_unicast_addr_list(enic);
+		enic_update_unicast_addr_list(enic);
 		if (!allmulti)
-			enic_add_multicast_addr_list(enic);
+			enic_update_multicast_addr_list(enic);
 	}
 }
 
@@ -1170,26 +1088,6 @@
 	enic->vlan_group = vlan_group;
 }
 
-/* rtnl lock is held */
-static void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
-{
-	struct enic *enic = netdev_priv(netdev);
-
-	spin_lock(&enic->devcmd_lock);
-	enic_add_vlan(enic, vid);
-	spin_unlock(&enic->devcmd_lock);
-}
-
-/* rtnl lock is held */
-static void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
-{
-	struct enic *enic = netdev_priv(netdev);
-
-	spin_lock(&enic->devcmd_lock);
-	enic_del_vlan(enic, vid);
-	spin_unlock(&enic->devcmd_lock);
-}
-
 /* netif_tx_lock held, BHs disabled */
 static void enic_tx_timeout(struct net_device *netdev)
 {
@@ -1197,40 +1095,6 @@
 	schedule_work(&enic->reset);
 }
 
-static int enic_vnic_dev_deinit(struct enic *enic)
-{
-	int err;
-
-	spin_lock(&enic->devcmd_lock);
-	err = vnic_dev_deinit(enic->vdev);
-	spin_unlock(&enic->devcmd_lock);
-
-	return err;
-}
-
-static int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
-{
-	int err;
-
-	spin_lock(&enic->devcmd_lock);
-	err = vnic_dev_init_prov(enic->vdev,
-		(u8 *)vp, vic_provinfo_size(vp));
-	spin_unlock(&enic->devcmd_lock);
-
-	return err;
-}
-
-static int enic_dev_init_done(struct enic *enic, int *done, int *error)
-{
-	int err;
-
-	spin_lock(&enic->devcmd_lock);
-	err = vnic_dev_init_done(enic->vdev, done, error);
-	spin_unlock(&enic->devcmd_lock);
-
-	return err;
-}
-
 static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
 {
 	struct enic *enic = netdev_priv(netdev);
@@ -1318,18 +1182,20 @@
 		vic_provinfo_free(vp);
 		if (err)
 			return err;
-
-		enic->pp.set |= ENIC_SET_APPLIED;
 		break;
 
 	case PORT_REQUEST_DISASSOCIATE:
-		enic->pp.set &= ~ENIC_SET_APPLIED;
 		break;
 
 	default:
 		return -EINVAL;
 	}
 
+	/* Set flag to indicate that the port assoc/disassoc
+	 * request has been sent out to fw
+	 */
+	enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
+
 	return 0;
 }
 
@@ -1379,9 +1245,6 @@
 
 		if (is_zero_ether_addr(netdev->dev_addr))
 			random_ether_addr(netdev->dev_addr);
-	} else if (new_pp.request == PORT_REQUEST_DISASSOCIATE) {
-		if (!is_zero_ether_addr(enic->pp.mac_addr))
-			enic_dev_del_addr(enic, enic->pp.mac_addr);
 	}
 
 	memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile));
@@ -1390,9 +1253,6 @@
 	if (err)
 		goto set_port_profile_cleanup;
 
-	if (!is_zero_ether_addr(enic->pp.mac_addr))
-		enic_dev_add_addr(enic, enic->pp.mac_addr);
-
 set_port_profile_cleanup:
 	memset(enic->pp.vf_mac, 0, ETH_ALEN);
 
@@ -1411,7 +1271,7 @@
 	int err, error, done;
 	u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
 
-	if (!(enic->pp.set & ENIC_SET_APPLIED))
+	if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
 		return -ENODATA;
 
 	err = enic_dev_init_done(enic, &done, &error);
@@ -1489,62 +1349,6 @@
 	return 0;
 }
 
-static int enic_rq_alloc_buf_a1(struct vnic_rq *rq)
-{
-	struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
-
-	if (vnic_rq_posting_soon(rq)) {
-
-		/* SW workaround for A0 HW erratum: if we're just about
-		 * to write posted_index, insert a dummy desc
-		 * of type resvd
-		 */
-
-		rq_enet_desc_enc(desc, 0, RQ_ENET_TYPE_RESV2, 0);
-		vnic_rq_post(rq, 0, 0, 0, 0);
-	} else {
-		return enic_rq_alloc_buf(rq);
-	}
-
-	return 0;
-}
-
-static int enic_dev_hw_version(struct enic *enic,
-	enum vnic_dev_hw_version *hw_ver)
-{
-	int err;
-
-	spin_lock(&enic->devcmd_lock);
-	err = vnic_dev_hw_version(enic->vdev, hw_ver);
-	spin_unlock(&enic->devcmd_lock);
-
-	return err;
-}
-
-static int enic_set_rq_alloc_buf(struct enic *enic)
-{
-	enum vnic_dev_hw_version hw_ver;
-	int err;
-
-	err = enic_dev_hw_version(enic, &hw_ver);
-	if (err)
-		return err;
-
-	switch (hw_ver) {
-	case VNIC_DEV_HW_VER_A1:
-		enic->rq_alloc_buf = enic_rq_alloc_buf_a1;
-		break;
-	case VNIC_DEV_HW_VER_A2:
-	case VNIC_DEV_HW_VER_UNKNOWN:
-		enic->rq_alloc_buf = enic_rq_alloc_buf;
-		break;
-	default:
-		return -ENODEV;
-	}
-
-	return 0;
-}
-
 static void enic_rq_indicate_buf(struct vnic_rq *rq,
 	struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
 	int skipped, void *opaque)
@@ -1681,7 +1485,7 @@
 			0 /* don't unmask intr */,
 			0 /* don't reset intr timer */);
 
-	err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
+	err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
 
 	/* Buffer allocation failed. Stay in polling
 	 * mode so we can try to fill the ring again.
@@ -1731,7 +1535,7 @@
 			0 /* don't unmask intr */,
 			0 /* don't reset intr timer */);
 
-	err = vnic_rq_fill(&enic->rq[rq], enic->rq_alloc_buf);
+	err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
 
 	/* Buffer allocation failed. Stay in polling mode
 	 * so we can try to fill the ring again.
@@ -1901,39 +1705,6 @@
 	return err;
 }
 
-static int enic_dev_notify_unset(struct enic *enic)
-{
-	int err;
-
-	spin_lock(&enic->devcmd_lock);
-	err = vnic_dev_notify_unset(enic->vdev);
-	spin_unlock(&enic->devcmd_lock);
-
-	return err;
-}
-
-static int enic_dev_enable(struct enic *enic)
-{
-	int err;
-
-	spin_lock(&enic->devcmd_lock);
-	err = vnic_dev_enable_wait(enic->vdev);
-	spin_unlock(&enic->devcmd_lock);
-
-	return err;
-}
-
-static int enic_dev_disable(struct enic *enic)
-{
-	int err;
-
-	spin_lock(&enic->devcmd_lock);
-	err = vnic_dev_disable(enic->vdev);
-	spin_unlock(&enic->devcmd_lock);
-
-	return err;
-}
-
 static void enic_notify_timer_start(struct enic *enic)
 {
 	switch (vnic_dev_get_intr_mode(enic->vdev)) {
@@ -1967,7 +1738,7 @@
 	}
 
 	for (i = 0; i < enic->rq_count; i++) {
-		vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
+		vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
 		/* Need at least one buffer on ring to get going */
 		if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
 			netdev_err(netdev, "Unable to alloc receive buffers\n");
@@ -2285,29 +2056,6 @@
 		rss_hash_bits, rss_base_cpu, rss_enable);
 }
 
-static int enic_dev_hang_notify(struct enic *enic)
-{
-	int err;
-
-	spin_lock(&enic->devcmd_lock);
-	err = vnic_dev_hang_notify(enic->vdev);
-	spin_unlock(&enic->devcmd_lock);
-
-	return err;
-}
-
-static int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
-{
-	int err;
-
-	spin_lock(&enic->devcmd_lock);
-	err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
-		IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
-	spin_unlock(&enic->devcmd_lock);
-
-	return err;
-}
-
 static void enic_reset(struct work_struct *work)
 {
 	struct enic *enic = container_of(work, struct enic, reset);
@@ -2320,7 +2068,7 @@
 	enic_dev_hang_notify(enic);
 	enic_stop(enic->netdev);
 	enic_dev_hang_reset(enic);
-	enic_reset_multicast_list(enic);
+	enic_reset_addr_lists(enic);
 	enic_init_vnic_resources(enic);
 	enic_set_rss_nic_cfg(enic);
 	enic_dev_set_ig_vlan_rewrite_mode(enic);
@@ -2332,7 +2080,7 @@
 static int enic_set_intr_mode(struct enic *enic)
 {
 	unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX);
-	unsigned int m = 1;
+	unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX);
 	unsigned int i;
 
 	/* Set interrupt mode (INTx, MSI, MSI-X) depending
@@ -2475,9 +2223,7 @@
 	.ndo_tx_timeout		= enic_tx_timeout,
 	.ndo_set_vf_port	= enic_set_vf_port,
 	.ndo_get_vf_port	= enic_get_vf_port,
-#ifdef IFLA_VF_MAX
 	.ndo_set_vf_mac		= enic_set_vf_mac,
-#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= enic_poll_controller,
 #endif
@@ -2556,25 +2302,12 @@
 
 	enic_init_vnic_resources(enic);
 
-	err = enic_set_rq_alloc_buf(enic);
-	if (err) {
-		dev_err(dev, "Failed to set RQ buffer allocator, aborting\n");
-		goto err_out_free_vnic_resources;
-	}
-
 	err = enic_set_rss_nic_cfg(enic);
 	if (err) {
 		dev_err(dev, "Failed to config nic, aborting\n");
 		goto err_out_free_vnic_resources;
 	}
 
-	err = enic_dev_set_ig_vlan_rewrite_mode(enic);
-	if (err) {
-		dev_err(dev,
-			"Failed to set ingress vlan rewrite mode, aborting.\n");
-		goto err_out_free_vnic_resources;
-	}
-
 	switch (vnic_dev_get_intr_mode(enic->vdev)) {
 	default:
 		netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
@@ -2713,6 +2446,22 @@
 		goto err_out_vnic_unregister;
 	}
 
+	/* Setup devcmd lock
+	 */
+
+	spin_lock_init(&enic->devcmd_lock);
+
+	/*
+	 * Set ingress vlan rewrite mode before vnic initialization
+	 */
+
+	err = enic_dev_set_ig_vlan_rewrite_mode(enic);
+	if (err) {
+		dev_err(dev,
+			"Failed to set ingress vlan rewrite mode, aborting.\n");
+		goto err_out_dev_close;
+	}
+
 	/* Issue device init to initialize the vnic-to-switch link.
 	 * We'll start with carrier off and wait for link UP
 	 * notification later to turn on carrier.  We don't need
@@ -2736,11 +2485,6 @@
 		}
 	}
 
-	/* Setup devcmd lock
-	 */
-
-	spin_lock_init(&enic->devcmd_lock);
-
 	err = enic_dev_init(enic);
 	if (err) {
 		dev_err(dev, "Device initialization failed, aborting\n");
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index fb35d8b..c489e72 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -419,25 +419,6 @@
 	return err;
 }
 
-int vnic_dev_hw_version(struct vnic_dev *vdev, enum vnic_dev_hw_version *hw_ver)
-{
-	struct vnic_devcmd_fw_info *fw_info;
-	int err;
-
-	err = vnic_dev_fw_info(vdev, &fw_info);
-	if (err)
-		return err;
-
-	if (strncmp(fw_info->hw_version, "A1", sizeof("A1")) == 0)
-		*hw_ver = VNIC_DEV_HW_VER_A1;
-	else if (strncmp(fw_info->hw_version, "A2", sizeof("A2")) == 0)
-		*hw_ver = VNIC_DEV_HW_VER_A2;
-	else
-		*hw_ver = VNIC_DEV_HW_VER_UNKNOWN;
-
-	return 0;
-}
-
 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
 	void *value)
 {
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index 05f9a24..e837546 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -44,12 +44,6 @@
 #undef pr_fmt
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-enum vnic_dev_hw_version {
-	VNIC_DEV_HW_VER_UNKNOWN,
-	VNIC_DEV_HW_VER_A1,
-	VNIC_DEV_HW_VER_A2,
-};
-
 enum vnic_dev_intr_mode {
 	VNIC_DEV_INTR_MODE_UNKNOWN,
 	VNIC_DEV_INTR_MODE_INTX,
@@ -93,8 +87,6 @@
 	u64 *a0, u64 *a1, int wait);
 int vnic_dev_fw_info(struct vnic_dev *vdev,
 	struct vnic_devcmd_fw_info **fw_info);
-int vnic_dev_hw_version(struct vnic_dev *vdev,
-	enum vnic_dev_hw_version *hw_ver);
 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
 	void *value);
 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
diff --git a/drivers/net/enic/vnic_rq.h b/drivers/net/enic/vnic_rq.h
index 37f08de..2056586 100644
--- a/drivers/net/enic/vnic_rq.h
+++ b/drivers/net/enic/vnic_rq.h
@@ -141,11 +141,6 @@
 	}
 }
 
-static inline int vnic_rq_posting_soon(struct vnic_rq *rq)
-{
-	return (rq->to_use->index & VNIC_RQ_RETURN_RATE) == 0;
-}
-
 static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
 {
 	rq->ring.desc_avail += count;
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 2a71373..634c0da 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -54,7 +54,7 @@
 
 #include "fec.h"
 
-#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+#if defined(CONFIG_ARM)
 #define FEC_ALIGNMENT	0xf
 #else
 #define FEC_ALIGNMENT	0x3
@@ -147,8 +147,7 @@
  * account when setting it.
  */
 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
-    defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
 #define	OPT_FRAME_SIZE	(PKT_MAXBUF_SIZE << 16)
 #else
 #define	OPT_FRAME_SIZE	0
@@ -183,7 +182,7 @@
 	struct bufdesc	*rx_bd_base;
 	struct bufdesc	*tx_bd_base;
 	/* The next free ring entry */
-	struct bufdesc	*cur_rx, *cur_tx; 
+	struct bufdesc	*cur_rx, *cur_tx;
 	/* The ring entries to be free()ed */
 	struct bufdesc	*dirty_tx;
 
@@ -191,28 +190,21 @@
 	/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
 	spinlock_t hw_lock;
 
-	struct  platform_device *pdev;
+	struct	platform_device *pdev;
 
 	int	opened;
 
 	/* Phylib and MDIO interface */
-	struct  mii_bus *mii_bus;
-	struct  phy_device *phy_dev;
-	int     mii_timeout;
-	uint    phy_speed;
+	struct	mii_bus *mii_bus;
+	struct	phy_device *phy_dev;
+	int	mii_timeout;
+	uint	phy_speed;
 	phy_interface_t	phy_interface;
 	int	link;
 	int	full_duplex;
 	struct	completion mdio_done;
 };
 
-static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
-static void fec_enet_tx(struct net_device *dev);
-static void fec_enet_rx(struct net_device *dev);
-static int fec_enet_close(struct net_device *dev);
-static void fec_restart(struct net_device *dev, int duplex);
-static void fec_stop(struct net_device *dev);
-
 /* FEC MII MMFR bits definition */
 #define FEC_MMFR_ST		(1 << 30)
 #define FEC_MMFR_OP_READ	(2 << 28)
@@ -239,9 +231,9 @@
 }
 
 static netdev_tx_t
-fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
-	struct fec_enet_private *fep = netdev_priv(dev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
 	const struct platform_device_id *id_entry =
 				platform_get_device_id(fep->pdev);
 	struct bufdesc *bdp;
@@ -262,9 +254,9 @@
 
 	if (status & BD_ENET_TX_READY) {
 		/* Ooops.  All transmit buffers are full.  Bail out.
-		 * This should not happen, since dev->tbusy should be set.
+		 * This should not happen, since ndev->tbusy should be set.
 		 */
-		printk("%s: tx queue full!.\n", dev->name);
+		printk("%s: tx queue full!.\n", ndev->name);
 		spin_unlock_irqrestore(&fep->hw_lock, flags);
 		return NETDEV_TX_BUSY;
 	}
@@ -284,7 +276,7 @@
 	if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
 		unsigned int index;
 		index = bdp - fep->tx_bd_base;
-		memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len);
+		memcpy(fep->tx_bounce[index], skb->data, skb->len);
 		bufaddr = fep->tx_bounce[index];
 	}
 
@@ -299,13 +291,13 @@
 	/* Save skb pointer */
 	fep->tx_skbuff[fep->skb_cur] = skb;
 
-	dev->stats.tx_bytes += skb->len;
+	ndev->stats.tx_bytes += skb->len;
 	fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
 
 	/* Push the data cache so the CPM does not get stale memory
 	 * data.
 	 */
-	bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr,
+	bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
 			FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
 
 	/* Send it on its way.  Tell FEC it's ready, interrupt when done,
@@ -326,7 +318,7 @@
 
 	if (bdp == fep->dirty_tx) {
 		fep->tx_full = 1;
-		netif_stop_queue(dev);
+		netif_stop_queue(ndev);
 	}
 
 	fep->cur_tx = bdp;
@@ -336,62 +328,170 @@
 	return NETDEV_TX_OK;
 }
 
+/* This function is called to start or restart the FEC during a link
+ * change.  This only happens when switching between half and full
+ * duplex.
+ */
 static void
-fec_timeout(struct net_device *dev)
+fec_restart(struct net_device *ndev, int duplex)
 {
-	struct fec_enet_private *fep = netdev_priv(dev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(fep->pdev);
+	int i;
+	u32 temp_mac[2];
+	u32 rcntl = OPT_FRAME_SIZE | 0x04;
 
-	dev->stats.tx_errors++;
+	/* Whack a reset.  We should wait for this. */
+	writel(1, fep->hwp + FEC_ECNTRL);
+	udelay(10);
 
-	fec_restart(dev, fep->full_duplex);
-	netif_wake_queue(dev);
+	/*
+	 * enet-mac reset will reset mac address registers too,
+	 * so need to reconfigure it.
+	 */
+	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+		memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
+		writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
+		writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
+	}
+
+	/* Clear any outstanding interrupt. */
+	writel(0xffc00000, fep->hwp + FEC_IEVENT);
+
+	/* Reset all multicast.	*/
+	writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+	writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+#ifndef CONFIG_M5272
+	writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
+	writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
+#endif
+
+	/* Set maximum receive buffer size. */
+	writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
+
+	/* Set receive and transmit descriptor base. */
+	writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
+	writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
+			fep->hwp + FEC_X_DES_START);
+
+	fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
+	fep->cur_rx = fep->rx_bd_base;
+
+	/* Reset SKB transmit buffers. */
+	fep->skb_cur = fep->skb_dirty = 0;
+	for (i = 0; i <= TX_RING_MOD_MASK; i++) {
+		if (fep->tx_skbuff[i]) {
+			dev_kfree_skb_any(fep->tx_skbuff[i]);
+			fep->tx_skbuff[i] = NULL;
+		}
+	}
+
+	/* Enable MII mode */
+	if (duplex) {
+		/* FD enable */
+		writel(0x04, fep->hwp + FEC_X_CNTRL);
+	} else {
+		/* No Rcv on Xmit */
+		rcntl |= 0x02;
+		writel(0x0, fep->hwp + FEC_X_CNTRL);
+	}
+
+	fep->full_duplex = duplex;
+
+	/* Set MII speed */
+	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+	/*
+	 * The phy interface and speed need to get configured
+	 * differently on enet-mac.
+	 */
+	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+		/* Enable flow control and length check */
+		rcntl |= 0x40000000 | 0x00000020;
+
+		/* MII or RMII */
+		if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+			rcntl |= (1 << 8);
+		else
+			rcntl &= ~(1 << 8);
+
+		/* 10M or 100M */
+		if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
+			rcntl &= ~(1 << 9);
+		else
+			rcntl |= (1 << 9);
+
+	} else {
+#ifdef FEC_MIIGSK_ENR
+		if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
+			/* disable the gasket and wait */
+			writel(0, fep->hwp + FEC_MIIGSK_ENR);
+			while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
+				udelay(1);
+
+			/*
+			 * configure the gasket:
+			 *   RMII, 50 MHz, no loopback, no echo
+			 */
+			writel(1, fep->hwp + FEC_MIIGSK_CFGR);
+
+			/* re-enable the gasket */
+			writel(2, fep->hwp + FEC_MIIGSK_ENR);
+		}
+#endif
+	}
+	writel(rcntl, fep->hwp + FEC_R_CNTRL);
+
+	/* And last, enable the transmit and receive processing */
+	writel(2, fep->hwp + FEC_ECNTRL);
+	writel(0, fep->hwp + FEC_R_DES_ACTIVE);
+
+	/* Enable interrupts we wish to service */
+	writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 }
 
-static irqreturn_t
-fec_enet_interrupt(int irq, void * dev_id)
+static void
+fec_stop(struct net_device *ndev)
 {
-	struct	net_device *dev = dev_id;
-	struct fec_enet_private *fep = netdev_priv(dev);
-	uint	int_events;
-	irqreturn_t ret = IRQ_NONE;
+	struct fec_enet_private *fep = netdev_priv(ndev);
 
-	do {
-		int_events = readl(fep->hwp + FEC_IEVENT);
-		writel(int_events, fep->hwp + FEC_IEVENT);
+	/* We cannot expect a graceful transmit stop without link !!! */
+	if (fep->link) {
+		writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
+		udelay(10);
+		if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
+			printk("fec_stop : Graceful transmit stop did not complete !\n");
+	}
 
-		if (int_events & FEC_ENET_RXF) {
-			ret = IRQ_HANDLED;
-			fec_enet_rx(dev);
-		}
-
-		/* Transmit OK, or non-fatal error. Update the buffer
-		 * descriptors. FEC handles all errors, we just discover
-		 * them as part of the transmit process.
-		 */
-		if (int_events & FEC_ENET_TXF) {
-			ret = IRQ_HANDLED;
-			fec_enet_tx(dev);
-		}
-
-		if (int_events & FEC_ENET_MII) {
-			ret = IRQ_HANDLED;
-			complete(&fep->mdio_done);
-		}
-	} while (int_events);
-
-	return ret;
+	/* Whack a reset.  We should wait for this. */
+	writel(1, fep->hwp + FEC_ECNTRL);
+	udelay(10);
+	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+	writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 }
 
 
 static void
-fec_enet_tx(struct net_device *dev)
+fec_timeout(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+
+	ndev->stats.tx_errors++;
+
+	fec_restart(ndev, fep->full_duplex);
+	netif_wake_queue(ndev);
+}
+
+static void
+fec_enet_tx(struct net_device *ndev)
 {
 	struct	fec_enet_private *fep;
 	struct bufdesc *bdp;
 	unsigned short status;
 	struct	sk_buff	*skb;
 
-	fep = netdev_priv(dev);
+	fep = netdev_priv(ndev);
 	spin_lock(&fep->hw_lock);
 	bdp = fep->dirty_tx;
 
@@ -399,7 +499,8 @@
 		if (bdp == fep->cur_tx && fep->tx_full == 0)
 			break;
 
-		dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
+		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+				FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
 		bdp->cbd_bufaddr = 0;
 
 		skb = fep->tx_skbuff[fep->skb_dirty];
@@ -407,19 +508,19 @@
 		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
 				   BD_ENET_TX_RL | BD_ENET_TX_UN |
 				   BD_ENET_TX_CSL)) {
-			dev->stats.tx_errors++;
+			ndev->stats.tx_errors++;
 			if (status & BD_ENET_TX_HB)  /* No heartbeat */
-				dev->stats.tx_heartbeat_errors++;
+				ndev->stats.tx_heartbeat_errors++;
 			if (status & BD_ENET_TX_LC)  /* Late collision */
-				dev->stats.tx_window_errors++;
+				ndev->stats.tx_window_errors++;
 			if (status & BD_ENET_TX_RL)  /* Retrans limit */
-				dev->stats.tx_aborted_errors++;
+				ndev->stats.tx_aborted_errors++;
 			if (status & BD_ENET_TX_UN)  /* Underrun */
-				dev->stats.tx_fifo_errors++;
+				ndev->stats.tx_fifo_errors++;
 			if (status & BD_ENET_TX_CSL) /* Carrier lost */
-				dev->stats.tx_carrier_errors++;
+				ndev->stats.tx_carrier_errors++;
 		} else {
-			dev->stats.tx_packets++;
+			ndev->stats.tx_packets++;
 		}
 
 		if (status & BD_ENET_TX_READY)
@@ -429,7 +530,7 @@
 		 * but we eventually sent the packet OK.
 		 */
 		if (status & BD_ENET_TX_DEF)
-			dev->stats.collisions++;
+			ndev->stats.collisions++;
 
 		/* Free the sk buffer associated with this last transmit */
 		dev_kfree_skb_any(skb);
@@ -446,8 +547,8 @@
 		 */
 		if (fep->tx_full) {
 			fep->tx_full = 0;
-			if (netif_queue_stopped(dev))
-				netif_wake_queue(dev);
+			if (netif_queue_stopped(ndev))
+				netif_wake_queue(ndev);
 		}
 	}
 	fep->dirty_tx = bdp;
@@ -461,9 +562,9 @@
  * effectively tossing the packet.
  */
 static void
-fec_enet_rx(struct net_device *dev)
+fec_enet_rx(struct net_device *ndev)
 {
-	struct	fec_enet_private *fep = netdev_priv(dev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
 	const struct platform_device_id *id_entry =
 				platform_get_device_id(fep->pdev);
 	struct bufdesc *bdp;
@@ -497,17 +598,17 @@
 		/* Check for errors. */
 		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
 			   BD_ENET_RX_CR | BD_ENET_RX_OV)) {
-			dev->stats.rx_errors++;
+			ndev->stats.rx_errors++;
 			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
 				/* Frame too long or too short. */
-				dev->stats.rx_length_errors++;
+				ndev->stats.rx_length_errors++;
 			}
 			if (status & BD_ENET_RX_NO)	/* Frame alignment */
-				dev->stats.rx_frame_errors++;
+				ndev->stats.rx_frame_errors++;
 			if (status & BD_ENET_RX_CR)	/* CRC Error */
-				dev->stats.rx_crc_errors++;
+				ndev->stats.rx_crc_errors++;
 			if (status & BD_ENET_RX_OV)	/* FIFO overrun */
-				dev->stats.rx_fifo_errors++;
+				ndev->stats.rx_fifo_errors++;
 		}
 
 		/* Report late collisions as a frame error.
@@ -515,19 +616,19 @@
 		 * have in the buffer.  So, just drop this frame on the floor.
 		 */
 		if (status & BD_ENET_RX_CL) {
-			dev->stats.rx_errors++;
-			dev->stats.rx_frame_errors++;
+			ndev->stats.rx_errors++;
+			ndev->stats.rx_frame_errors++;
 			goto rx_processing_done;
 		}
 
 		/* Process the incoming frame. */
-		dev->stats.rx_packets++;
+		ndev->stats.rx_packets++;
 		pkt_len = bdp->cbd_datlen;
-		dev->stats.rx_bytes += pkt_len;
+		ndev->stats.rx_bytes += pkt_len;
 		data = (__u8*)__va(bdp->cbd_bufaddr);
 
-	        dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
-        			DMA_FROM_DEVICE);
+		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+				FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
 
 		if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
 			swap_buffer(data, pkt_len);
@@ -541,18 +642,18 @@
 
 		if (unlikely(!skb)) {
 			printk("%s: Memory squeeze, dropping packet.\n",
-					dev->name);
-			dev->stats.rx_dropped++;
+					ndev->name);
+			ndev->stats.rx_dropped++;
 		} else {
 			skb_reserve(skb, NET_IP_ALIGN);
 			skb_put(skb, pkt_len - 4);	/* Make room */
 			skb_copy_to_linear_data(skb, data, pkt_len - 4);
-			skb->protocol = eth_type_trans(skb, dev);
+			skb->protocol = eth_type_trans(skb, ndev);
 			netif_rx(skb);
 		}
 
-        	bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen,
-			DMA_FROM_DEVICE);
+		bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
+				FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
 rx_processing_done:
 		/* Clear the status flags for this buffer */
 		status &= ~BD_ENET_RX_STATS;
@@ -577,10 +678,47 @@
 	spin_unlock(&fep->hw_lock);
 }
 
-/* ------------------------------------------------------------------------- */
-static void __inline__ fec_get_mac(struct net_device *dev)
+static irqreturn_t
+fec_enet_interrupt(int irq, void *dev_id)
 {
-	struct fec_enet_private *fep = netdev_priv(dev);
+	struct net_device *ndev = dev_id;
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	uint int_events;
+	irqreturn_t ret = IRQ_NONE;
+
+	do {
+		int_events = readl(fep->hwp + FEC_IEVENT);
+		writel(int_events, fep->hwp + FEC_IEVENT);
+
+		if (int_events & FEC_ENET_RXF) {
+			ret = IRQ_HANDLED;
+			fec_enet_rx(ndev);
+		}
+
+		/* Transmit OK, or non-fatal error. Update the buffer
+		 * descriptors. FEC handles all errors, we just discover
+		 * them as part of the transmit process.
+		 */
+		if (int_events & FEC_ENET_TXF) {
+			ret = IRQ_HANDLED;
+			fec_enet_tx(ndev);
+		}
+
+		if (int_events & FEC_ENET_MII) {
+			ret = IRQ_HANDLED;
+			complete(&fep->mdio_done);
+		}
+	} while (int_events);
+
+	return ret;
+}
+
+
+
+/* ------------------------------------------------------------------------- */
+static void __inline__ fec_get_mac(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
 	struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
 	unsigned char *iap, tmpaddr[ETH_ALEN];
 
@@ -616,11 +754,11 @@
 		iap = &tmpaddr[0];
 	}
 
-	memcpy(dev->dev_addr, iap, ETH_ALEN);
+	memcpy(ndev->dev_addr, iap, ETH_ALEN);
 
 	/* Adjust MAC if using macaddr */
 	if (iap == macaddr)
-		 dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
+		 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
 }
 
 /* ------------------------------------------------------------------------- */
@@ -628,9 +766,9 @@
 /*
  * Phy section
  */
-static void fec_enet_adjust_link(struct net_device *dev)
+static void fec_enet_adjust_link(struct net_device *ndev)
 {
-	struct fec_enet_private *fep = netdev_priv(dev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
 	struct phy_device *phy_dev = fep->phy_dev;
 	unsigned long flags;
 
@@ -647,7 +785,7 @@
 	/* Duplex link change */
 	if (phy_dev->link) {
 		if (fep->full_duplex != phy_dev->duplex) {
-			fec_restart(dev, phy_dev->duplex);
+			fec_restart(ndev, phy_dev->duplex);
 			status_change = 1;
 		}
 	}
@@ -656,9 +794,9 @@
 	if (phy_dev->link != fep->link) {
 		fep->link = phy_dev->link;
 		if (phy_dev->link)
-			fec_restart(dev, phy_dev->duplex);
+			fec_restart(ndev, phy_dev->duplex);
 		else
-			fec_stop(dev);
+			fec_stop(ndev);
 		status_change = 1;
 	}
 
@@ -727,9 +865,9 @@
 	return 0;
 }
 
-static int fec_enet_mii_probe(struct net_device *dev)
+static int fec_enet_mii_probe(struct net_device *ndev)
 {
-	struct fec_enet_private *fep = netdev_priv(dev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
 	struct phy_device *phy_dev = NULL;
 	char mdio_bus_id[MII_BUS_ID_SIZE];
 	char phy_name[MII_BUS_ID_SIZE + 3];
@@ -754,16 +892,16 @@
 
 	if (phy_id >= PHY_MAX_ADDR) {
 		printk(KERN_INFO "%s: no PHY, assuming direct connection "
-			"to switch\n", dev->name);
+			"to switch\n", ndev->name);
 		strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
 		phy_id = 0;
 	}
 
 	snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
-	phy_dev = phy_connect(dev, phy_name, &fec_enet_adjust_link, 0,
+	phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
 		PHY_INTERFACE_MODE_MII);
 	if (IS_ERR(phy_dev)) {
-		printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
+		printk(KERN_ERR "%s: could not attach to PHY\n", ndev->name);
 		return PTR_ERR(phy_dev);
 	}
 
@@ -776,7 +914,7 @@
 	fep->full_duplex = 0;
 
 	printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
-		"(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
+		"(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
 		fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
 		fep->phy_dev->irq);
 
@@ -786,8 +924,8 @@
 static int fec_enet_mii_init(struct platform_device *pdev)
 {
 	static struct mii_bus *fec0_mii_bus;
-	struct net_device *dev = platform_get_drvdata(pdev);
-	struct fec_enet_private *fep = netdev_priv(dev);
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
 	const struct platform_device_id *id_entry =
 				platform_get_device_id(fep->pdev);
 	int err = -ENXIO, i;
@@ -845,8 +983,6 @@
 	for (i = 0; i < PHY_MAX_ADDR; i++)
 		fep->mii_bus->irq[i] = PHY_POLL;
 
-	platform_set_drvdata(dev, fep->mii_bus);
-
 	if (mdiobus_register(fep->mii_bus))
 		goto err_out_free_mdio_irq;
 
@@ -873,10 +1009,10 @@
 	mdiobus_free(fep->mii_bus);
 }
 
-static int fec_enet_get_settings(struct net_device *dev,
+static int fec_enet_get_settings(struct net_device *ndev,
 				  struct ethtool_cmd *cmd)
 {
-	struct fec_enet_private *fep = netdev_priv(dev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
 	struct phy_device *phydev = fep->phy_dev;
 
 	if (!phydev)
@@ -885,10 +1021,10 @@
 	return phy_ethtool_gset(phydev, cmd);
 }
 
-static int fec_enet_set_settings(struct net_device *dev,
+static int fec_enet_set_settings(struct net_device *ndev,
 				 struct ethtool_cmd *cmd)
 {
-	struct fec_enet_private *fep = netdev_priv(dev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
 	struct phy_device *phydev = fep->phy_dev;
 
 	if (!phydev)
@@ -897,14 +1033,14 @@
 	return phy_ethtool_sset(phydev, cmd);
 }
 
-static void fec_enet_get_drvinfo(struct net_device *dev,
+static void fec_enet_get_drvinfo(struct net_device *ndev,
 				 struct ethtool_drvinfo *info)
 {
-	struct fec_enet_private *fep = netdev_priv(dev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
 
 	strcpy(info->driver, fep->pdev->dev.driver->name);
 	strcpy(info->version, "Revision: 1.0");
-	strcpy(info->bus_info, dev_name(&dev->dev));
+	strcpy(info->bus_info, dev_name(&ndev->dev));
 }
 
 static struct ethtool_ops fec_enet_ethtool_ops = {
@@ -914,12 +1050,12 @@
 	.get_link		= ethtool_op_get_link,
 };
 
-static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
 {
-	struct fec_enet_private *fep = netdev_priv(dev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
 	struct phy_device *phydev = fep->phy_dev;
 
-	if (!netif_running(dev))
+	if (!netif_running(ndev))
 		return -EINVAL;
 
 	if (!phydev)
@@ -928,9 +1064,9 @@
 	return phy_mii_ioctl(phydev, rq, cmd);
 }
 
-static void fec_enet_free_buffers(struct net_device *dev)
+static void fec_enet_free_buffers(struct net_device *ndev)
 {
-	struct fec_enet_private *fep = netdev_priv(dev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
 	int i;
 	struct sk_buff *skb;
 	struct bufdesc	*bdp;
@@ -940,7 +1076,7 @@
 		skb = fep->rx_skbuff[i];
 
 		if (bdp->cbd_bufaddr)
-			dma_unmap_single(&dev->dev, bdp->cbd_bufaddr,
+			dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
 					FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
 		if (skb)
 			dev_kfree_skb(skb);
@@ -952,9 +1088,9 @@
 		kfree(fep->tx_bounce[i]);
 }
 
-static int fec_enet_alloc_buffers(struct net_device *dev)
+static int fec_enet_alloc_buffers(struct net_device *ndev)
 {
-	struct fec_enet_private *fep = netdev_priv(dev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
 	int i;
 	struct sk_buff *skb;
 	struct bufdesc	*bdp;
@@ -963,12 +1099,12 @@
 	for (i = 0; i < RX_RING_SIZE; i++) {
 		skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
 		if (!skb) {
-			fec_enet_free_buffers(dev);
+			fec_enet_free_buffers(ndev);
 			return -ENOMEM;
 		}
 		fep->rx_skbuff[i] = skb;
 
-		bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data,
+		bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
 				FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
 		bdp->cbd_sc = BD_ENET_RX_EMPTY;
 		bdp++;
@@ -995,45 +1131,47 @@
 }
 
 static int
-fec_enet_open(struct net_device *dev)
+fec_enet_open(struct net_device *ndev)
 {
-	struct fec_enet_private *fep = netdev_priv(dev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
 	int ret;
 
 	/* I should reset the ring buffers here, but I don't yet know
 	 * a simple way to do that.
 	 */
 
-	ret = fec_enet_alloc_buffers(dev);
+	ret = fec_enet_alloc_buffers(ndev);
 	if (ret)
 		return ret;
 
 	/* Probe and connect to PHY when open the interface */
-	ret = fec_enet_mii_probe(dev);
+	ret = fec_enet_mii_probe(ndev);
 	if (ret) {
-		fec_enet_free_buffers(dev);
+		fec_enet_free_buffers(ndev);
 		return ret;
 	}
 	phy_start(fep->phy_dev);
-	netif_start_queue(dev);
+	netif_start_queue(ndev);
 	fep->opened = 1;
 	return 0;
 }
 
 static int
-fec_enet_close(struct net_device *dev)
+fec_enet_close(struct net_device *ndev)
 {
-	struct fec_enet_private *fep = netdev_priv(dev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
 
 	/* Don't know what to do yet. */
 	fep->opened = 0;
-	netif_stop_queue(dev);
-	fec_stop(dev);
+	netif_stop_queue(ndev);
+	fec_stop(ndev);
 
-	if (fep->phy_dev)
+	if (fep->phy_dev) {
+		phy_stop(fep->phy_dev);
 		phy_disconnect(fep->phy_dev);
+	}
 
-        fec_enet_free_buffers(dev);
+	fec_enet_free_buffers(ndev);
 
 	return 0;
 }
@@ -1051,14 +1189,14 @@
 #define HASH_BITS	6		/* #bits in hash */
 #define CRC32_POLY	0xEDB88320
 
-static void set_multicast_list(struct net_device *dev)
+static void set_multicast_list(struct net_device *ndev)
 {
-	struct fec_enet_private *fep = netdev_priv(dev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
 	struct netdev_hw_addr *ha;
 	unsigned int i, bit, data, crc, tmp;
 	unsigned char hash;
 
-	if (dev->flags & IFF_PROMISC) {
+	if (ndev->flags & IFF_PROMISC) {
 		tmp = readl(fep->hwp + FEC_R_CNTRL);
 		tmp |= 0x8;
 		writel(tmp, fep->hwp + FEC_R_CNTRL);
@@ -1069,7 +1207,7 @@
 	tmp &= ~0x8;
 	writel(tmp, fep->hwp + FEC_R_CNTRL);
 
-	if (dev->flags & IFF_ALLMULTI) {
+	if (ndev->flags & IFF_ALLMULTI) {
 		/* Catch all multicast addresses, so set the
 		 * filter to all 1's
 		 */
@@ -1084,7 +1222,7 @@
 	writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
 	writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
 
-	netdev_for_each_mc_addr(ha, dev) {
+	netdev_for_each_mc_addr(ha, ndev) {
 		/* Only support group multicast for now */
 		if (!(ha->addr[0] & 1))
 			continue;
@@ -1092,7 +1230,7 @@
 		/* calculate crc32 value of mac address */
 		crc = 0xffffffff;
 
-		for (i = 0; i < dev->addr_len; i++) {
+		for (i = 0; i < ndev->addr_len; i++) {
 			data = ha->addr[i];
 			for (bit = 0; bit < 8; bit++, data >>= 1) {
 				crc = (crc >> 1) ^
@@ -1119,20 +1257,20 @@
 
 /* Set a MAC change in hardware. */
 static int
-fec_set_mac_address(struct net_device *dev, void *p)
+fec_set_mac_address(struct net_device *ndev, void *p)
 {
-	struct fec_enet_private *fep = netdev_priv(dev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
 	struct sockaddr *addr = p;
 
 	if (!is_valid_ether_addr(addr->sa_data))
 		return -EADDRNOTAVAIL;
 
-	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
 
-	writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) |
-		(dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24),
+	writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
+		(ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
 		fep->hwp + FEC_ADDR_LOW);
-	writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24),
+	writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
 		fep->hwp + FEC_ADDR_HIGH);
 	return 0;
 }
@@ -1146,16 +1284,16 @@
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_tx_timeout		= fec_timeout,
 	.ndo_set_mac_address	= fec_set_mac_address,
-	.ndo_do_ioctl           = fec_enet_ioctl,
+	.ndo_do_ioctl		= fec_enet_ioctl,
 };
 
  /*
   * XXX:  We need to clean up on failure exits here.
   *
   */
-static int fec_enet_init(struct net_device *dev)
+static int fec_enet_init(struct net_device *ndev)
 {
-	struct fec_enet_private *fep = netdev_priv(dev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
 	struct bufdesc *cbd_base;
 	struct bufdesc *bdp;
 	int i;
@@ -1170,20 +1308,19 @@
 
 	spin_lock_init(&fep->hw_lock);
 
-	fep->hwp = (void __iomem *)dev->base_addr;
-	fep->netdev = dev;
+	fep->netdev = ndev;
 
 	/* Get the Ethernet address */
-	fec_get_mac(dev);
+	fec_get_mac(ndev);
 
 	/* Set receive and transmit descriptor base. */
 	fep->rx_bd_base = cbd_base;
 	fep->tx_bd_base = cbd_base + RX_RING_SIZE;
 
 	/* The FEC Ethernet specific entries in the device structure */
-	dev->watchdog_timeo = TX_TIMEOUT;
-	dev->netdev_ops = &fec_netdev_ops;
-	dev->ethtool_ops = &fec_enet_ethtool_ops;
+	ndev->watchdog_timeo = TX_TIMEOUT;
+	ndev->netdev_ops = &fec_netdev_ops;
+	ndev->ethtool_ops = &fec_enet_ethtool_ops;
 
 	/* Initialize the receive buffer descriptors. */
 	bdp = fep->rx_bd_base;
@@ -1212,152 +1349,11 @@
 	bdp--;
 	bdp->cbd_sc |= BD_SC_WRAP;
 
-	fec_restart(dev, 0);
+	fec_restart(ndev, 0);
 
 	return 0;
 }
 
-/* This function is called to start or restart the FEC during a link
- * change.  This only happens when switching between half and full
- * duplex.
- */
-static void
-fec_restart(struct net_device *dev, int duplex)
-{
-	struct fec_enet_private *fep = netdev_priv(dev);
-	const struct platform_device_id *id_entry =
-				platform_get_device_id(fep->pdev);
-	int i;
-	u32 val, temp_mac[2];
-
-	/* Whack a reset.  We should wait for this. */
-	writel(1, fep->hwp + FEC_ECNTRL);
-	udelay(10);
-
-	/*
-	 * enet-mac reset will reset mac address registers too,
-	 * so need to reconfigure it.
-	 */
-	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
-		memcpy(&temp_mac, dev->dev_addr, ETH_ALEN);
-		writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
-		writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
-	}
-
-	/* Clear any outstanding interrupt. */
-	writel(0xffc00000, fep->hwp + FEC_IEVENT);
-
-	/* Reset all multicast.	*/
-	writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
-	writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
-#ifndef CONFIG_M5272
-	writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
-	writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
-#endif
-
-	/* Set maximum receive buffer size. */
-	writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
-
-	/* Set receive and transmit descriptor base. */
-	writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
-	writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
-			fep->hwp + FEC_X_DES_START);
-
-	fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
-	fep->cur_rx = fep->rx_bd_base;
-
-	/* Reset SKB transmit buffers. */
-	fep->skb_cur = fep->skb_dirty = 0;
-	for (i = 0; i <= TX_RING_MOD_MASK; i++) {
-		if (fep->tx_skbuff[i]) {
-			dev_kfree_skb_any(fep->tx_skbuff[i]);
-			fep->tx_skbuff[i] = NULL;
-		}
-	}
-
-	/* Enable MII mode */
-	if (duplex) {
-		/* MII enable / FD enable */
-		writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
-		writel(0x04, fep->hwp + FEC_X_CNTRL);
-	} else {
-		/* MII enable / No Rcv on Xmit */
-		writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
-		writel(0x0, fep->hwp + FEC_X_CNTRL);
-	}
-	fep->full_duplex = duplex;
-
-	/* Set MII speed */
-	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
-
-	/*
-	 * The phy interface and speed need to get configured
-	 * differently on enet-mac.
-	 */
-	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
-		val = readl(fep->hwp + FEC_R_CNTRL);
-
-		/* MII or RMII */
-		if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
-			val |= (1 << 8);
-		else
-			val &= ~(1 << 8);
-
-		/* 10M or 100M */
-		if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
-			val &= ~(1 << 9);
-		else
-			val |= (1 << 9);
-
-		writel(val, fep->hwp + FEC_R_CNTRL);
-	} else {
-#ifdef FEC_MIIGSK_ENR
-		if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
-			/* disable the gasket and wait */
-			writel(0, fep->hwp + FEC_MIIGSK_ENR);
-			while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
-				udelay(1);
-
-			/*
-			 * configure the gasket:
-			 *   RMII, 50 MHz, no loopback, no echo
-			 */
-			writel(1, fep->hwp + FEC_MIIGSK_CFGR);
-
-			/* re-enable the gasket */
-			writel(2, fep->hwp + FEC_MIIGSK_ENR);
-		}
-#endif
-	}
-
-	/* And last, enable the transmit and receive processing */
-	writel(2, fep->hwp + FEC_ECNTRL);
-	writel(0, fep->hwp + FEC_R_DES_ACTIVE);
-
-	/* Enable interrupts we wish to service */
-	writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
-}
-
-static void
-fec_stop(struct net_device *dev)
-{
-	struct fec_enet_private *fep = netdev_priv(dev);
-
-	/* We cannot expect a graceful transmit stop without link !!! */
-	if (fep->link) {
-		writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
-		udelay(10);
-		if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
-			printk("fec_stop : Graceful transmit stop did not complete !\n");
-	}
-
-	/* Whack a reset.  We should wait for this. */
-	writel(1, fep->hwp + FEC_ECNTRL);
-	udelay(10);
-	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
-	writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
-}
-
 static int __devinit
 fec_probe(struct platform_device *pdev)
 {
@@ -1377,19 +1373,20 @@
 
 	/* Init network device */
 	ndev = alloc_etherdev(sizeof(struct fec_enet_private));
-	if (!ndev)
-		return -ENOMEM;
+	if (!ndev) {
+		ret = -ENOMEM;
+		goto failed_alloc_etherdev;
+	}
 
 	SET_NETDEV_DEV(ndev, &pdev->dev);
 
 	/* setup board info structure */
 	fep = netdev_priv(ndev);
-	memset(fep, 0, sizeof(*fep));
 
-	ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
+	fep->hwp = ioremap(r->start, resource_size(r));
 	fep->pdev = pdev;
 
-	if (!ndev->base_addr) {
+	if (!fep->hwp) {
 		ret = -ENOMEM;
 		goto failed_ioremap;
 	}
@@ -1407,10 +1404,9 @@
 			break;
 		ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
 		if (ret) {
-			while (i >= 0) {
+			while (--i >= 0) {
 				irq = platform_get_irq(pdev, i);
 				free_irq(irq, ndev);
-				i--;
 			}
 			goto failed_irq;
 		}
@@ -1453,9 +1449,11 @@
 			free_irq(irq, ndev);
 	}
 failed_irq:
-	iounmap((void __iomem *)ndev->base_addr);
+	iounmap(fep->hwp);
 failed_ioremap:
 	free_netdev(ndev);
+failed_alloc_etherdev:
+	release_mem_region(r->start, resource_size(r));
 
 	return ret;
 }
@@ -1465,16 +1463,22 @@
 {
 	struct net_device *ndev = platform_get_drvdata(pdev);
 	struct fec_enet_private *fep = netdev_priv(ndev);
-
-	platform_set_drvdata(pdev, NULL);
+	struct resource *r;
 
 	fec_stop(ndev);
 	fec_enet_mii_remove(fep);
 	clk_disable(fep->clk);
 	clk_put(fep->clk);
-	iounmap((void __iomem *)ndev->base_addr);
+	iounmap(fep->hwp);
 	unregister_netdev(ndev);
 	free_netdev(ndev);
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	BUG_ON(!r);
+	release_mem_region(r->start, resource_size(r));
+
+	platform_set_drvdata(pdev, NULL);
+
 	return 0;
 }
 
@@ -1483,16 +1487,14 @@
 fec_suspend(struct device *dev)
 {
 	struct net_device *ndev = dev_get_drvdata(dev);
-	struct fec_enet_private *fep;
+	struct fec_enet_private *fep = netdev_priv(ndev);
 
-	if (ndev) {
-		fep = netdev_priv(ndev);
-		if (netif_running(ndev)) {
-			fec_stop(ndev);
-			netif_device_detach(ndev);
-		}
-		clk_disable(fep->clk);
+	if (netif_running(ndev)) {
+		fec_stop(ndev);
+		netif_device_detach(ndev);
 	}
+	clk_disable(fep->clk);
+
 	return 0;
 }
 
@@ -1500,16 +1502,14 @@
 fec_resume(struct device *dev)
 {
 	struct net_device *ndev = dev_get_drvdata(dev);
-	struct fec_enet_private *fep;
+	struct fec_enet_private *fep = netdev_priv(ndev);
 
-	if (ndev) {
-		fep = netdev_priv(ndev);
-		clk_enable(fep->clk);
-		if (netif_running(ndev)) {
-			fec_restart(ndev, fep->full_duplex);
-			netif_device_attach(ndev);
-		}
+	clk_enable(fep->clk);
+	if (netif_running(ndev)) {
+		fec_restart(ndev, fep->full_duplex);
+		netif_device_attach(ndev);
 	}
+
 	return 0;
 }
 
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index af09296..9c0b1ba 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5645,6 +5645,8 @@
 		goto out_error;
 	}
 
+	netif_carrier_off(dev);
+
 	dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
 		 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
 
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 119aa20..5ed8f9f 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1920,7 +1920,7 @@
 		if (err) {
 			for (j = 0; j < i; j++)
 				free_grp_irqs(&priv->gfargrp[j]);
-				goto irq_fail;
+			goto irq_fail;
 		}
 	}
 
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index ac1d323..8931168 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -400,13 +400,14 @@
 static void *bpq_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
 	struct list_head *p;
+	struct bpqdev *bpqdev = v;
 
 	++*pos;
 
 	if (v == SEQ_START_TOKEN)
-		p = rcu_dereference(bpq_devices.next);
+		p = rcu_dereference(list_next_rcu(&bpq_devices));
 	else
-		p = rcu_dereference(((struct bpqdev *)v)->bpq_list.next);
+		p = rcu_dereference(list_next_rcu(&bpqdev->bpq_list));
 
 	return (p == &bpq_devices) ? NULL 
 		: list_entry(p, struct bpqdev, bpq_list);
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 0a2368f..65c1833 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -129,6 +129,7 @@
 		break;
 	case E1000_DEV_ID_82580_COPPER:
 	case E1000_DEV_ID_82580_FIBER:
+	case E1000_DEV_ID_82580_QUAD_FIBER:
 	case E1000_DEV_ID_82580_SERDES:
 	case E1000_DEV_ID_82580_SGMII:
 	case E1000_DEV_ID_82580_COPPER_DUAL:
@@ -237,9 +238,15 @@
 		size = 14;
 	nvm->word_size = 1 << size;
 
-	/* if 82576 then initialize mailbox parameters */
-	if (mac->type == e1000_82576)
+	/* if part supports SR-IOV then initialize mailbox parameters */
+	switch (mac->type) {
+	case e1000_82576:
+	case e1000_i350:
 		igb_init_mbx_params_pf(hw);
+		break;
+	default:
+		break;
+	}
 
 	/* setup PHY parameters */
 	if (phy->media_type != e1000_media_type_copper) {
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index e2638af..281324e 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -54,6 +54,7 @@
 #define E1000_DEV_ID_82580_SERDES             0x1510
 #define E1000_DEV_ID_82580_SGMII              0x1511
 #define E1000_DEV_ID_82580_COPPER_DUAL        0x1516
+#define E1000_DEV_ID_82580_QUAD_FIBER         0x1527
 #define E1000_DEV_ID_DH89XXCC_SGMII           0x0438
 #define E1000_DEV_ID_DH89XXCC_SERDES          0x043A
 #define E1000_DEV_ID_DH89XXCC_BACKPLANE       0x043C
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c
index c474cdb..78d48c7 100644
--- a/drivers/net/igb/e1000_mbx.c
+++ b/drivers/net/igb/e1000_mbx.c
@@ -422,26 +422,24 @@
 {
 	struct e1000_mbx_info *mbx = &hw->mbx;
 
-	if (hw->mac.type == e1000_82576) {
-		mbx->timeout = 0;
-		mbx->usec_delay = 0;
+	mbx->timeout = 0;
+	mbx->usec_delay = 0;
 
-		mbx->size = E1000_VFMAILBOX_SIZE;
+	mbx->size = E1000_VFMAILBOX_SIZE;
 
-		mbx->ops.read = igb_read_mbx_pf;
-		mbx->ops.write = igb_write_mbx_pf;
-		mbx->ops.read_posted = igb_read_posted_mbx;
-		mbx->ops.write_posted = igb_write_posted_mbx;
-		mbx->ops.check_for_msg = igb_check_for_msg_pf;
-		mbx->ops.check_for_ack = igb_check_for_ack_pf;
-		mbx->ops.check_for_rst = igb_check_for_rst_pf;
+	mbx->ops.read = igb_read_mbx_pf;
+	mbx->ops.write = igb_write_mbx_pf;
+	mbx->ops.read_posted = igb_read_posted_mbx;
+	mbx->ops.write_posted = igb_write_posted_mbx;
+	mbx->ops.check_for_msg = igb_check_for_msg_pf;
+	mbx->ops.check_for_ack = igb_check_for_ack_pf;
+	mbx->ops.check_for_rst = igb_check_for_rst_pf;
 
-		mbx->stats.msgs_tx = 0;
-		mbx->stats.msgs_rx = 0;
-		mbx->stats.reqs = 0;
-		mbx->stats.acks = 0;
-		mbx->stats.rsts = 0;
-	}
+	mbx->stats.msgs_tx = 0;
+	mbx->stats.msgs_rx = 0;
+	mbx->stats.reqs = 0;
+	mbx->stats.acks = 0;
+	mbx->stats.rsts = 0;
 
 	return 0;
 }
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 58c665b..cb6bf7b 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -68,6 +68,7 @@
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
 	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
@@ -2286,9 +2287,14 @@
 
 	spin_lock_init(&adapter->stats64_lock);
 #ifdef CONFIG_PCI_IOV
-	if (hw->mac.type == e1000_82576)
+	switch (hw->mac.type) {
+	case e1000_82576:
+	case e1000_i350:
 		adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs;
-
+		break;
+	default:
+		break;
+	}
 #endif /* CONFIG_PCI_IOV */
 	adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
 
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 9e3f4f5..4488bd5 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -635,7 +635,7 @@
 
 	ret = sh_irda_set_baudrate(self, speed);
 	if (ret < 0)
-		return ret;
+		goto sh_irda_hard_xmit_end;
 
 	self->tx_buff.len = 0;
 	if (skb->len) {
@@ -652,11 +652,21 @@
 
 		sh_irda_write(self, IRTFLR, self->tx_buff.len);
 		sh_irda_write(self, IRTCTR, ARMOD | TE);
-	}
+	} else
+		goto sh_irda_hard_xmit_end;
 
 	dev_kfree_skb(skb);
 
 	return 0;
+
+sh_irda_hard_xmit_end:
+	sh_irda_set_baudrate(self, 9600);
+	netif_wake_queue(self->ndev);
+	sh_irda_rcv_ctrl(self, 1);
+	dev_kfree_skb(skb);
+
+	return ret;
+
 }
 
 static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 3b8c924..12769b5 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -334,6 +334,10 @@
 	u16 bd_number;
 	struct work_struct reset_task;
 	struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+
+	/* DCB parameters */
+	struct ieee_pfc *ixgbe_ieee_pfc;
+	struct ieee_ets *ixgbe_ieee_ets;
 	struct ixgbe_dcb_config dcb_cfg;
 	struct ixgbe_dcb_config temp_dcb_cfg;
 	u8 dcb_set_bitmap;
@@ -521,7 +525,6 @@
 extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
 extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
 extern int ethtool_ioctl(struct ifreq *ifr);
-extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
 extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
 extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
 extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index d5ede2d..ebbda7d 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1370,6 +1370,9 @@
 		hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
 
 		hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+		/*  clear VMDq pool/queue selection for RAR 0 */
+		hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
 	}
 	hw->addr_ctrl.overflow_promisc = 0;
 
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 66ed045..90cceb4 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -29,6 +29,7 @@
 #define _IXGBE_COMMON_H_
 
 #include "ixgbe_type.h"
+#include "ixgbe.h"
 
 u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
@@ -110,9 +111,8 @@
 
 #define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
 
-extern struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw);
 #define hw_dbg(hw, format, arg...) \
-	netdev_dbg(ixgbe_get_hw_dev(hw), format, ##arg)
+	netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg)
 #define e_dev_info(format, arg...) \
 	dev_info(&adapter->pdev->dev, format, ## arg)
 #define e_dev_warn(format, arg...) \
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index d16c260..13c962e 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -34,6 +34,42 @@
 #include "ixgbe_dcb_82599.h"
 
 /**
+ * ixgbe_ieee_credits - This calculates the ieee traffic class
+ * credits from the configured bandwidth percentages. Credits
+ * are the smallest unit programable into the underlying
+ * hardware. The IEEE 802.1Qaz specification do not use bandwidth
+ * groups so this is much simplified from the CEE case.
+ */
+s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame)
+{
+	int min_percent = 100;
+	int min_credit, multiplier;
+	int i;
+
+	min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
+			DCB_CREDIT_QUANTUM;
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		if (bw[i] < min_percent && bw[i])
+			min_percent = bw[i];
+	}
+
+	multiplier = (min_credit / min_percent) + 1;
+
+	/* Find out the hw credits for each TC */
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL);
+
+		if (val < min_credit)
+			val = min_credit;
+		refill[i] = val;
+
+		max[i] = (bw[i] * MAX_CREDIT)/100;
+	}
+	return 0;
+}
+
+/**
  * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits
  * @ixgbe_dcb_config: Struct containing DCB settings.
  * @direction: Configuring either Tx or Rx.
@@ -141,6 +177,59 @@
 	return ret_val;
 }
 
+void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
+{
+	int i;
+
+	*pfc_en = 0;
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+		*pfc_en |= (cfg->tc_config[i].dcb_pfc & 0xF) << i;
+}
+
+void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,
+			     u16 *refill)
+{
+	struct tc_bw_alloc *p;
+	int i;
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		p = &cfg->tc_config[i].path[direction];
+		refill[i] = p->data_credits_refill;
+	}
+}
+
+void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max)
+{
+	int i;
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+		max[i] = cfg->tc_config[i].desc_credits_max;
+}
+
+void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction,
+			    u8 *bwgid)
+{
+	struct tc_bw_alloc *p;
+	int i;
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		p = &cfg->tc_config[i].path[direction];
+		bwgid[i] = p->bwg_id;
+	}
+}
+
+void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
+			    u8 *ptype)
+{
+	struct tc_bw_alloc *p;
+	int i;
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		p = &cfg->tc_config[i].path[direction];
+		ptype[i] = p->prio_type;
+	}
+}
+
 /**
  * ixgbe_dcb_hw_config - Config and enable DCB
  * @hw: pointer to hardware structure
@@ -152,13 +241,30 @@
                         struct ixgbe_dcb_config *dcb_config)
 {
 	s32 ret = 0;
+	u8 pfc_en;
+	u8 ptype[MAX_TRAFFIC_CLASS];
+	u8 bwgid[MAX_TRAFFIC_CLASS];
+	u16 refill[MAX_TRAFFIC_CLASS];
+	u16 max[MAX_TRAFFIC_CLASS];
+
+	/* Unpack CEE standard containers */
+	ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en);
+	ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill);
+	ixgbe_dcb_unpack_max(dcb_config, max);
+	ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid);
+	ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype);
+
 	switch (hw->mac.type) {
 	case ixgbe_mac_82598EB:
-		ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
+		ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->rx_pba_cfg,
+						pfc_en, refill, max, bwgid,
+						ptype);
 		break;
 	case ixgbe_mac_82599EB:
 	case ixgbe_mac_X540:
-		ret = ixgbe_dcb_hw_config_82599(hw, dcb_config);
+		ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->rx_pba_cfg,
+						pfc_en, refill, max, bwgid,
+						ptype);
 		break;
 	default:
 		break;
@@ -166,3 +272,70 @@
 	return ret;
 }
 
+/* Helper routines to abstract HW specifics from DCB netlink ops */
+s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en)
+{
+	int ret = -EINVAL;
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+		ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en);
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
+			    u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa)
+{
+	int i;
+	u8 prio_type[IEEE_8021QAZ_MAX_TCS];
+
+	/* Map TSA onto CEE prio type */
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		switch (tsa[i]) {
+		case IEEE_8021QAZ_TSA_STRICT:
+			prio_type[i] = 2;
+			break;
+		case IEEE_8021QAZ_TSA_ETS:
+			prio_type[i] = 0;
+			break;
+		default:
+			/* Hardware only supports priority strict or
+			 * ETS transmission selection algorithms if
+			 * we receive some other value from dcbnl
+			 * throw an error
+			 */
+			return -EINVAL;
+		}
+	}
+
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max,
+							prio_type);
+		ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
+							     bwg_id, prio_type);
+		ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
+							     bwg_id, prio_type);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+		ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
+						  bwg_id, prio_type);
+		ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
+						       bwg_id, prio_type);
+		ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
+						       bwg_id, prio_type);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 1cfe38e..e593511 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -139,7 +139,6 @@
 	struct tc_configuration tc_config[MAX_TRAFFIC_CLASS];
 	u8     bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
 	bool   pfc_mode_enable;
-	bool   round_robin_enable;
 
 	enum dcb_rx_pba_cfg rx_pba_cfg;
 
@@ -148,12 +147,21 @@
 };
 
 /* DCB driver APIs */
+void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en);
+void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *);
+void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);
+void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
+void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
 
 /* DCB credits calculation */
+s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame);
 s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
 				   struct ixgbe_dcb_config *, int, u8);
 
 /* DCB hw initialization */
+s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
+			    u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type);
+s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en);
 s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
 
 /* DCB definitions for credit calculation */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 9a5e89c..2965edc 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -38,15 +38,14 @@
  *
  * Configure packet buffers for DCB mode.
  */
-static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw,
-						 struct ixgbe_dcb_config *dcb_config)
+static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, u8 rx_pba)
 {
 	s32 ret_val = 0;
 	u32 value = IXGBE_RXPBSIZE_64KB;
 	u8  i = 0;
 
 	/* Setup Rx packet buffer sizes */
-	switch (dcb_config->rx_pba_cfg) {
+	switch (rx_pba) {
 	case pba_80_48:
 		/* Setup the first four at 80KB */
 		value = IXGBE_RXPBSIZE_80KB;
@@ -78,10 +77,11 @@
  *
  * Configure Rx Data Arbiter and credits for each traffic class.
  */
-static s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
-                                      struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
+					u16 *refill,
+					u16 *max,
+					u8 *prio_type)
 {
-	struct tc_bw_alloc    *p;
 	u32    reg           = 0;
 	u32    credit_refill = 0;
 	u32    credit_max    = 0;
@@ -102,13 +102,12 @@
 
 	/* Configure traffic class credits and priority */
 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-		p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
-		credit_refill = p->data_credits_refill;
-		credit_max    = p->data_credits_max;
+		credit_refill = refill[i];
+		credit_max    = max[i];
 
 		reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT);
 
-		if (p->prio_type == prio_link)
+		if (prio_type[i] == prio_link)
 			reg |= IXGBE_RT2CR_LSP;
 
 		IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg);
@@ -135,10 +134,12 @@
  *
  * Configure Tx Descriptor Arbiter and credits for each traffic class.
  */
-static s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
-                                           struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+						u16 *refill,
+						u16 *max,
+						u8 *bwg_id,
+						u8 *prio_type)
 {
-	struct tc_bw_alloc *p;
 	u32    reg, max_credits;
 	u8     i;
 
@@ -146,10 +147,8 @@
 
 	/* Enable arbiter */
 	reg &= ~IXGBE_DPMCS_ARBDIS;
-	if (!(dcb_config->round_robin_enable)) {
-		/* Enable DFP and Recycle mode */
-		reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
-	}
+	/* Enable DFP and Recycle mode */
+	reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
 	reg |= IXGBE_DPMCS_TSOEF;
 	/* Configure Max TSO packet size 34KB including payload and headers */
 	reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
@@ -158,16 +157,15 @@
 
 	/* Configure traffic class credits and priority */
 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-		p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
-		max_credits = dcb_config->tc_config[i].desc_credits_max;
+		max_credits = max[i];
 		reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT;
-		reg |= p->data_credits_refill;
-		reg |= (u32)(p->bwg_id) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
+		reg |= refill[i];
+		reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT;
 
-		if (p->prio_type == prio_group)
+		if (prio_type[i] == prio_group)
 			reg |= IXGBE_TDTQ2TCCR_GSP;
 
-		if (p->prio_type == prio_link)
+		if (prio_type[i] == prio_link)
 			reg |= IXGBE_TDTQ2TCCR_LSP;
 
 		IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg);
@@ -183,10 +181,12 @@
  *
  * Configure Tx Data Arbiter and credits for each traffic class.
  */
-static s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
-                                           struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
+						u16 *refill,
+						u16 *max,
+						u8 *bwg_id,
+						u8 *prio_type)
 {
-	struct tc_bw_alloc *p;
 	u32 reg;
 	u8 i;
 
@@ -200,15 +200,14 @@
 
 	/* Configure traffic class credits and priority */
 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-		p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
-		reg = p->data_credits_refill;
-		reg |= (u32)(p->data_credits_max) << IXGBE_TDPT2TCCR_MCL_SHIFT;
-		reg |= (u32)(p->bwg_id) << IXGBE_TDPT2TCCR_BWG_SHIFT;
+		reg = refill[i];
+		reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT;
+		reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT;
 
-		if (p->prio_type == prio_group)
+		if (prio_type[i] == prio_group)
 			reg |= IXGBE_TDPT2TCCR_GSP;
 
-		if (p->prio_type == prio_link)
+		if (prio_type[i] == prio_link)
 			reg |= IXGBE_TDPT2TCCR_LSP;
 
 		IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg);
@@ -229,13 +228,12 @@
  *
  * Configure Priority Flow Control for each traffic class.
  */
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
-                               struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
 {
 	u32 reg, rx_pba_size;
 	u8  i;
 
-	if (!dcb_config->pfc_mode_enable)
+	if (!pfc_en)
 		goto out;
 
 	/* Enable Transmit Priority Flow Control */
@@ -256,19 +254,20 @@
 	 * for each traffic class.
 	 */
 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		int enabled = pfc_en & (1 << i);
 		rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
 		rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
 		reg = (rx_pba_size - hw->fc.low_water) << 10;
 
-		if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
-		    dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
+		if (enabled == pfc_enabled_tx ||
+		    enabled == pfc_enabled_full)
 			reg |= IXGBE_FCRTL_XONE;
 
 		IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
 
 		reg = (rx_pba_size - hw->fc.high_water) << 10;
-		if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
-		    dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
+		if (enabled == pfc_enabled_tx ||
+		    enabled == pfc_enabled_full)
 			reg |= IXGBE_FCRTH_FCEN;
 
 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
@@ -292,7 +291,7 @@
  * Configure queue statistics registers, all queues belonging to same traffic
  * class uses a single set of queue statistics counters.
  */
-static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
+s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
 {
 	u32 reg = 0;
 	u8  i   = 0;
@@ -325,13 +324,16 @@
  * Configure dcb settings and enable dcb mode.
  */
 s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
-                              struct ixgbe_dcb_config *dcb_config)
+			      u8 rx_pba, u8 pfc_en, u16 *refill,
+			      u16 *max, u8 *bwg_id, u8 *prio_type)
 {
-	ixgbe_dcb_config_packet_buffers_82598(hw, dcb_config);
-	ixgbe_dcb_config_rx_arbiter_82598(hw, dcb_config);
-	ixgbe_dcb_config_tx_desc_arbiter_82598(hw, dcb_config);
-	ixgbe_dcb_config_tx_data_arbiter_82598(hw, dcb_config);
-	ixgbe_dcb_config_pfc_82598(hw, dcb_config);
+	ixgbe_dcb_config_packet_buffers_82598(hw, rx_pba);
+	ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
+	ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
+					       bwg_id, prio_type);
+	ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max,
+					       bwg_id, prio_type);
+	ixgbe_dcb_config_pfc_82598(hw, pfc_en);
 	ixgbe_dcb_config_tc_stats_82598(hw);
 
 	return 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
index abc03cc..0d2a758 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -71,9 +71,28 @@
 /* DCB hardware-specific driver APIs */
 
 /* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en);
 
 /* DCB hw initialization */
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, struct ixgbe_dcb_config *);
+s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw,
+					u16 *refill,
+					u16 *max,
+					u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
+						u16 *refill,
+						u16 *max,
+						u8 *bwg_id,
+						u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
+						u16 *refill,
+						u16 *max,
+						u8 *bwg_id,
+						u8 *prio_type);
+
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
+			      u8 rx_pba, u8 pfc_en, u16 *refill,
+			      u16 *max, u8 *bwg_id, u8 *prio_type);
 
 #endif /* _DCB_82598_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 374e1f7..b0d97a9 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -33,19 +33,18 @@
 /**
  * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @rx_pba: method to distribute packet buffer
  *
  * Configure packet buffers for DCB mode.
  */
-static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw,
-                                          struct ixgbe_dcb_config *dcb_config)
+static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba)
 {
 	s32 ret_val = 0;
 	u32 value = IXGBE_RXPBSIZE_64KB;
 	u8  i = 0;
 
 	/* Setup Rx packet buffer sizes */
-	switch (dcb_config->rx_pba_cfg) {
+	switch (rx_pba) {
 	case pba_80_48:
 		/* Setup the first four at 80KB */
 		value = IXGBE_RXPBSIZE_80KB;
@@ -75,14 +74,19 @@
 /**
  * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
  *
  * Configure Rx Packet Arbiter and credits for each traffic class.
  */
-static s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
-                                      struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
+				      u16 *refill,
+				      u16 *max,
+				      u8 *bwg_id,
+				      u8 *prio_type)
 {
-	struct tc_bw_alloc    *p;
 	u32    reg           = 0;
 	u32    credit_refill = 0;
 	u32    credit_max    = 0;
@@ -103,15 +107,13 @@
 
 	/* Configure traffic class credits and priority */
 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-		p = &dcb_config->tc_config[i].path[DCB_RX_CONFIG];
-
-		credit_refill = p->data_credits_refill;
-		credit_max    = p->data_credits_max;
+		credit_refill = refill[i];
+		credit_max    = max[i];
 		reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
 
-		reg |= (u32)(p->bwg_id) << IXGBE_RTRPT4C_BWG_SHIFT;
+		reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
 
-		if (p->prio_type == prio_link)
+		if (prio_type[i] == prio_link)
 			reg |= IXGBE_RTRPT4C_LSP;
 
 		IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
@@ -130,14 +132,19 @@
 /**
  * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
  *
  * Configure Tx Descriptor Arbiter and credits for each traffic class.
  */
-static s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
-                                           struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
+					   u16 *refill,
+					   u16 *max,
+					   u8 *bwg_id,
+					   u8 *prio_type)
 {
-	struct tc_bw_alloc *p;
 	u32    reg, max_credits;
 	u8     i;
 
@@ -149,16 +156,15 @@
 
 	/* Configure traffic class credits and priority */
 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-		p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
-		max_credits = dcb_config->tc_config[i].desc_credits_max;
+		max_credits = max[i];
 		reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
-		reg |= p->data_credits_refill;
-		reg |= (u32)(p->bwg_id) << IXGBE_RTTDT2C_BWG_SHIFT;
+		reg |= refill[i];
+		reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
 
-		if (p->prio_type == prio_group)
+		if (prio_type[i] == prio_group)
 			reg |= IXGBE_RTTDT2C_GSP;
 
-		if (p->prio_type == prio_link)
+		if (prio_type[i] == prio_link)
 			reg |= IXGBE_RTTDT2C_LSP;
 
 		IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
@@ -177,14 +183,19 @@
 /**
  * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
  *
  * Configure Tx Packet Arbiter and credits for each traffic class.
  */
-static s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
-                                           struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
+					   u16 *refill,
+					   u16 *max,
+					   u8 *bwg_id,
+					   u8 *prio_type)
 {
-	struct tc_bw_alloc *p;
 	u32 reg;
 	u8 i;
 
@@ -205,15 +216,14 @@
 
 	/* Configure traffic class credits and priority */
 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-		p = &dcb_config->tc_config[i].path[DCB_TX_CONFIG];
-		reg = p->data_credits_refill;
-		reg |= (u32)(p->data_credits_max) << IXGBE_RTTPT2C_MCL_SHIFT;
-		reg |= (u32)(p->bwg_id) << IXGBE_RTTPT2C_BWG_SHIFT;
+		reg = refill[i];
+		reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
+		reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
 
-		if (p->prio_type == prio_group)
+		if (prio_type[i] == prio_group)
 			reg |= IXGBE_RTTPT2C_GSP;
 
-		if (p->prio_type == prio_link)
+		if (prio_type[i] == prio_link)
 			reg |= IXGBE_RTTPT2C_LSP;
 
 		IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
@@ -233,17 +243,16 @@
 /**
  * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @pfc_en: enabled pfc bitmask
  *
  * Configure Priority Flow Control (PFC) for each traffic class.
  */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
-                               struct ixgbe_dcb_config *dcb_config)
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en)
 {
 	u32 i, reg, rx_pba_size;
 
 	/* If PFC is disabled globally then fall back to LFC. */
-	if (!dcb_config->pfc_mode_enable) {
+	if (!pfc_en) {
 		for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
 			hw->mac.ops.fc_enable(hw, i);
 		goto out;
@@ -251,19 +260,18 @@
 
 	/* Configure PFC Tx thresholds per TC */
 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		int enabled = pfc_en & (1 << i);
 		rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
 		rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
 
 		reg = (rx_pba_size - hw->fc.low_water) << 10;
 
-		if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
-		    dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
+		if (enabled)
 			reg |= IXGBE_FCRTL_XONE;
 		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
 
 		reg = (rx_pba_size - hw->fc.high_water) << 10;
-		if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
-		    dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
+		if (enabled)
 			reg |= IXGBE_FCRTH_FCEN;
 		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
 	}
@@ -349,7 +357,6 @@
 /**
  * ixgbe_dcb_config_82599 - Configure general DCB parameters
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
  *
  * Configure general DCB parameters.
  */
@@ -406,19 +413,27 @@
 /**
  * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
  * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
+ * @rx_pba: method to distribute packet buffer
+ * @refill: refill credits index by traffic class
+ * @max: max credits index by traffic class
+ * @bwg_id: bandwidth grouping indexed by traffic class
+ * @prio_type: priority type indexed by traffic class
+ * @pfc_en: enabled pfc bitmask
  *
  * Configure dcb settings and enable dcb mode.
  */
 s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
-                              struct ixgbe_dcb_config *dcb_config)
+			      u8 rx_pba, u8 pfc_en, u16 *refill,
+			      u16 *max, u8 *bwg_id, u8 *prio_type)
 {
-	ixgbe_dcb_config_packet_buffers_82599(hw, dcb_config);
+	ixgbe_dcb_config_packet_buffers_82599(hw, rx_pba);
 	ixgbe_dcb_config_82599(hw);
-	ixgbe_dcb_config_rx_arbiter_82599(hw, dcb_config);
-	ixgbe_dcb_config_tx_desc_arbiter_82599(hw, dcb_config);
-	ixgbe_dcb_config_tx_data_arbiter_82599(hw, dcb_config);
-	ixgbe_dcb_config_pfc_82599(hw, dcb_config);
+	ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, prio_type);
+	ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
+					       bwg_id, prio_type);
+	ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
+					       bwg_id, prio_type);
+	ixgbe_dcb_config_pfc_82599(hw, pfc_en);
 	ixgbe_dcb_config_tc_stats_82599(hw);
 
 	return 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 3841649..5b0ca85 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -102,11 +102,29 @@
 /* DCB hardware-specific driver APIs */
 
 /* DCB PFC functions */
-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
-                               struct ixgbe_dcb_config *dcb_config);
+s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en);
 
 /* DCB hw initialization */
+s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
+					u16 *refill,
+					u16 *max,
+					u8 *bwg_id,
+					u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
+						u16 *refill,
+						u16 *max,
+						u8 *bwg_id,
+						u8 *prio_type);
+
+s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
+						u16 *refill,
+						u16 *max,
+						u8 *bwg_id,
+						u8 *prio_type);
+
 s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
-                              struct ixgbe_dcb_config *config);
+			      u8 rx_pba, u8 pfc_en, u16 *refill,
+			      u16 *max, u8 *bwg_id, u8 *prio_type);
 
 #endif /* _DCB_82599_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index bf566e8..a977df3 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -37,7 +37,6 @@
 #define BIT_PG_RX	0x04
 #define BIT_PG_TX	0x08
 #define BIT_APP_UPCHG	0x10
-#define BIT_RESETLINK   0x40
 #define BIT_LINKSPEED   0x80
 
 /* Responses for the DCB_C_SET_ALL command */
@@ -225,10 +224,8 @@
 	    (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent !=
 	     adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) ||
 	    (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
-	     adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) {
+	     adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap))
 		adapter->dcb_set_bitmap |= BIT_PG_TX;
-		adapter->dcb_set_bitmap |= BIT_RESETLINK;
-	}
 }
 
 static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -239,10 +236,8 @@
 	adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct;
 
 	if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] !=
-	    adapter->dcb_cfg.bw_percentage[0][bwg_id]) {
+	    adapter->dcb_cfg.bw_percentage[0][bwg_id])
 		adapter->dcb_set_bitmap |= BIT_PG_TX;
-		adapter->dcb_set_bitmap |= BIT_RESETLINK;
-	}
 }
 
 static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
@@ -269,10 +264,8 @@
 	    (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent !=
 	     adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) ||
 	    (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap !=
-	     adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) {
+	     adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap))
 		adapter->dcb_set_bitmap |= BIT_PG_RX;
-		adapter->dcb_set_bitmap |= BIT_RESETLINK;
-	}
 }
 
 static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
@@ -283,10 +276,8 @@
 	adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct;
 
 	if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] !=
-	    adapter->dcb_cfg.bw_percentage[1][bwg_id]) {
+	    adapter->dcb_cfg.bw_percentage[1][bwg_id])
 		adapter->dcb_set_bitmap |= BIT_PG_RX;
-		adapter->dcb_set_bitmap |= BIT_RESETLINK;
-	}
 }
 
 static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
@@ -365,21 +356,17 @@
 		return DCB_NO_HW_CHG;
 
 	/*
-	 * Only take down the adapter if the configuration change
-	 * requires a reset.
+	 * Only take down the adapter if an app change occured. FCoE
+	 * may shuffle tx rings in this case and this can not be done
+	 * without a reset currently.
 	 */
-	if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
+	if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
 		while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
 			msleep(1);
 
-		if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
-			if (netif_running(netdev))
-				netdev->netdev_ops->ndo_stop(netdev);
-			ixgbe_clear_interrupt_scheme(adapter);
-		} else {
-			if (netif_running(netdev))
-				ixgbe_down(adapter);
-		}
+		if (netif_running(netdev))
+			netdev->netdev_ops->ndo_stop(netdev);
+		ixgbe_clear_interrupt_scheme(adapter);
 	}
 
 	if (adapter->dcb_cfg.pfc_mode_enable) {
@@ -408,29 +395,51 @@
 		}
 	}
 
-	if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
-		if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
-			ixgbe_init_interrupt_scheme(adapter);
-			if (netif_running(netdev))
-				netdev->netdev_ops->ndo_open(netdev);
-		} else {
-			if (netif_running(netdev))
-				ixgbe_up(adapter);
-		}
+	if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
+		ixgbe_init_interrupt_scheme(adapter);
+		if (netif_running(netdev))
+			netdev->netdev_ops->ndo_open(netdev);
 		ret = DCB_HW_CHG_RST;
-	} else if (adapter->dcb_set_bitmap & BIT_PFC) {
-		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
-			ixgbe_dcb_config_pfc_82598(&adapter->hw,
-			                           &adapter->dcb_cfg);
-		else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
-			ixgbe_dcb_config_pfc_82599(&adapter->hw,
-			                           &adapter->dcb_cfg);
+	}
+
+	if (adapter->dcb_set_bitmap & BIT_PFC) {
+		u8 pfc_en;
+		ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en);
+		ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en);
 		ret = DCB_HW_CHG;
 	}
+
+	if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
+		u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
+		u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
+		int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+#ifdef CONFIG_FCOE
+		if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+			max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif
+
+		ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
+					       max_frame, DCB_TX_CONFIG);
+		ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg,
+					       max_frame, DCB_RX_CONFIG);
+
+		ixgbe_dcb_unpack_refill(&adapter->dcb_cfg,
+					DCB_TX_CONFIG, refill);
+		ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max);
+		ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg,
+				       DCB_TX_CONFIG, bwg_id);
+		ixgbe_dcb_unpack_prio(&adapter->dcb_cfg,
+				      DCB_TX_CONFIG, prio_type);
+
+		ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
+					bwg_id, prio_type);
+	}
+
 	if (adapter->dcb_cfg.pfc_mode_enable)
 		adapter->hw.fc.current_mode = ixgbe_fc_pfc;
 
-	if (adapter->dcb_set_bitmap & BIT_RESETLINK)
+	if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
 		clear_bit(__IXGBE_RESETTING, &adapter->state);
 	adapter->dcb_set_bitmap = 0x00;
 	return ret;
@@ -568,18 +577,29 @@
 	case DCB_APP_IDTYPE_ETHTYPE:
 #ifdef IXGBE_FCOE
 		if (id == ETH_P_FCOE) {
-			u8 tc;
-			struct ixgbe_adapter *adapter;
+			u8 old_tc;
+			struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-			adapter = netdev_priv(netdev);
-			tc = adapter->fcoe.tc;
+			/* Get current programmed tc */
+			old_tc = adapter->fcoe.tc;
 			rval = ixgbe_fcoe_setapp(adapter, up);
-			if ((!rval) && (tc != adapter->fcoe.tc) &&
-			    (adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
-			    (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
+
+			if (rval ||
+			   !(adapter->flags & IXGBE_FLAG_DCB_ENABLED) ||
+			   !(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+				break;
+
+			/* The FCoE application priority may be changed multiple
+			 * times in quick sucession with switches that build up
+			 * TLVs. To avoid creating uneeded device resets this
+			 * checks the actual HW configuration and clears
+			 * BIT_APP_UPCHG if a HW configuration change is not
+			 * need
+			 */
+			if (old_tc == adapter->fcoe.tc)
+				adapter->dcb_set_bitmap &= ~BIT_APP_UPCHG;
+			else
 				adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
-				adapter->dcb_set_bitmap |= BIT_RESETLINK;
-			}
 		}
 #endif
 		break;
@@ -591,7 +611,98 @@
 	return rval;
 }
 
+static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
+				   struct ieee_ets *ets)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(dev);
+	struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets;
+
+	/* No IEEE PFC settings available */
+	if (!my_ets)
+		return -EINVAL;
+
+	ets->ets_cap = MAX_TRAFFIC_CLASS;
+	ets->cbs = my_ets->cbs;
+	memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
+	memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
+	memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
+	memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
+	return 0;
+}
+
+static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
+				   struct ieee_ets *ets)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(dev);
+	__u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS];
+	int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+	int err;
+	/* naively give each TC a bwg to map onto CEE hardware */
+	__u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+	if (!adapter->ixgbe_ieee_ets) {
+		adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets),
+						  GFP_KERNEL);
+		if (!adapter->ixgbe_ieee_ets)
+			return -ENOMEM;
+	}
+
+
+	memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets));
+
+	ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame);
+	err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max,
+				      bwg_id, ets->tc_tsa);
+	return err;
+}
+
+static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
+				   struct ieee_pfc *pfc)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(dev);
+	struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc;
+	int i;
+
+	/* No IEEE PFC settings available */
+	if (!my_pfc)
+		return -EINVAL;
+
+	pfc->pfc_cap = MAX_TRAFFIC_CLASS;
+	pfc->pfc_en = my_pfc->pfc_en;
+	pfc->mbc = my_pfc->mbc;
+	pfc->delay = my_pfc->delay;
+
+	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+		pfc->requests[i] = adapter->stats.pxoffrxc[i];
+		pfc->indications[i] = adapter->stats.pxofftxc[i];
+	}
+
+	return 0;
+}
+
+static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
+				   struct ieee_pfc *pfc)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(dev);
+	int err;
+
+	if (!adapter->ixgbe_ieee_pfc) {
+		adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc),
+						  GFP_KERNEL);
+		if (!adapter->ixgbe_ieee_pfc)
+			return -ENOMEM;
+	}
+
+	memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
+	err = ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en);
+	return err;
+}
+
 const struct dcbnl_rtnl_ops dcbnl_ops = {
+	.ieee_getets	= ixgbe_dcbnl_ieee_getets,
+	.ieee_setets	= ixgbe_dcbnl_ieee_setets,
+	.ieee_getpfc	= ixgbe_dcbnl_ieee_getpfc,
+	.ieee_setpfc	= ixgbe_dcbnl_ieee_setpfc,
 	.getstate	= ixgbe_dcbnl_get_state,
 	.setstate	= ixgbe_dcbnl_set_state,
 	.getpermhwaddr	= ixgbe_dcbnl_get_perm_hw_addr,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 2002ea8..309272f 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -152,7 +152,17 @@
 		ecmd->supported |= (SUPPORTED_1000baseT_Full |
 		                    SUPPORTED_Autoneg);
 
+		switch (hw->mac.type) {
+		case ixgbe_mac_X540:
+			ecmd->supported |= SUPPORTED_100baseT_Full;
+			break;
+		default:
+			break;
+		}
+
 		ecmd->advertising = ADVERTISED_Autoneg;
+		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+			ecmd->advertising |= ADVERTISED_100baseT_Full;
 		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
 			ecmd->advertising |= ADVERTISED_10000baseT_Full;
 		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
@@ -167,6 +177,15 @@
 			ecmd->advertising |= (ADVERTISED_10000baseT_Full |
 					      ADVERTISED_1000baseT_Full);
 
+		switch (hw->mac.type) {
+		case ixgbe_mac_X540:
+			if (!(ecmd->advertising & ADVERTISED_100baseT_Full))
+				ecmd->advertising |= (ADVERTISED_100baseT_Full);
+			break;
+		default:
+			break;
+		}
+
 		if (hw->phy.media_type == ixgbe_media_type_copper) {
 			ecmd->supported |= SUPPORTED_TP;
 			ecmd->advertising |= ADVERTISED_TP;
@@ -271,8 +290,19 @@
 
 	hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
 	if (link_up) {
-		ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
-		               SPEED_10000 : SPEED_1000;
+		switch (link_speed) {
+		case IXGBE_LINK_SPEED_10GB_FULL:
+			ecmd->speed = SPEED_10000;
+			break;
+		case IXGBE_LINK_SPEED_1GB_FULL:
+			ecmd->speed = SPEED_1000;
+			break;
+		case IXGBE_LINK_SPEED_100_FULL:
+			ecmd->speed = SPEED_100;
+			break;
+		default:
+			break;
+		}
 		ecmd->duplex = DUPLEX_FULL;
 	} else {
 		ecmd->speed = -1;
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 6342d48..c54a882 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -159,13 +159,13 @@
 	struct scatterlist *sg;
 	unsigned int i, j, dmacount;
 	unsigned int len;
-	static const unsigned int bufflen = 4096;
+	static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
 	unsigned int firstoff = 0;
 	unsigned int lastsize;
 	unsigned int thisoff = 0;
 	unsigned int thislen = 0;
 	u32 fcbuff, fcdmarw, fcfltrw;
-	dma_addr_t addr;
+	dma_addr_t addr = 0;
 
 	if (!netdev || !sgl)
 		return 0;
@@ -254,6 +254,24 @@
 	/* only the last buffer may have non-full bufflen */
 	lastsize = thisoff + thislen;
 
+	/*
+	 * lastsize can not be buffer len.
+	 * If it is then adding another buffer with lastsize = 1.
+	 */
+	if (lastsize == bufflen) {
+		if (j >= IXGBE_BUFFCNT_MAX) {
+			e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
+				"not enough user buffers. We need an extra "
+				"buffer because lastsize is bufflen.\n",
+				xid, i, j, dmacount, (u64)addr);
+			goto out_noddp_free;
+		}
+
+		ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
+		j++;
+		lastsize = 1;
+	}
+
 	fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
 	fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
 	fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
@@ -532,6 +550,24 @@
 			e_err(drv, "failed to allocated FCoE DDP pool\n");
 
 		spin_lock_init(&fcoe->lock);
+
+		/* Extra buffer to be shared by all DDPs for HW work around */
+		fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
+		if (fcoe->extra_ddp_buffer == NULL) {
+			e_err(drv, "failed to allocated extra DDP buffer\n");
+			goto out_extra_ddp_buffer_alloc;
+		}
+
+		fcoe->extra_ddp_buffer_dma =
+			dma_map_single(&adapter->pdev->dev,
+				       fcoe->extra_ddp_buffer,
+				       IXGBE_FCBUFF_MIN,
+				       DMA_FROM_DEVICE);
+		if (dma_mapping_error(&adapter->pdev->dev,
+				      fcoe->extra_ddp_buffer_dma)) {
+			e_err(drv, "failed to map extra DDP buffer\n");
+			goto out_extra_ddp_buffer_dma;
+		}
 	}
 
 	/* Enable L2 eth type filter for FCoE */
@@ -581,6 +617,14 @@
 		}
 	}
 #endif
+
+	return;
+
+out_extra_ddp_buffer_dma:
+	kfree(fcoe->extra_ddp_buffer);
+out_extra_ddp_buffer_alloc:
+	pci_pool_destroy(fcoe->pool);
+	fcoe->pool = NULL;
 }
 
 /**
@@ -600,6 +644,11 @@
 	if (fcoe->pool) {
 		for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
 			ixgbe_fcoe_ddp_put(adapter->netdev, i);
+		dma_unmap_single(&adapter->pdev->dev,
+				 fcoe->extra_ddp_buffer_dma,
+				 IXGBE_FCBUFF_MIN,
+				 DMA_FROM_DEVICE);
+		kfree(fcoe->extra_ddp_buffer);
 		pci_pool_destroy(fcoe->pool);
 		fcoe->pool = NULL;
 	}
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index 4bc2c55..65cc8fb 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -70,6 +70,8 @@
 	spinlock_t lock;
 	struct pci_pool *pool;
 	struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
+	unsigned char *extra_ddp_buffer;
+	dma_addr_t extra_ddp_buffer_dma;
 };
 
 #endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 602078b..eca762d 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -52,7 +52,7 @@
 static const char ixgbe_driver_string[] =
 			      "Intel(R) 10 Gigabit PCI Express Network Driver";
 
-#define DRV_VERSION "3.0.12-k2"
+#define DRV_VERSION "3.2.9-k2"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
 
@@ -648,7 +648,7 @@
  *
  * Returns : a tc index for use in range 0-7, or 0-3
  */
-u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
+static u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
 {
 	int tc = -1;
 	int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
@@ -3176,9 +3176,16 @@
 	u32 mhadd, hlreg0;
 
 	/* Decide whether to use packet split mode or not */
+	/* On by default */
+	adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+
 	/* Do not use packet split if we're in SR-IOV Mode */
-	if (!adapter->num_vfs)
-		adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+	if (adapter->num_vfs)
+		adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+
+	/* Disable packet split due to 82599 erratum #45 */
+	if (hw->mac.type == ixgbe_mac_82599EB)
+		adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
 
 	/* Set the RX buffer length according to the mode */
 	if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -3721,7 +3728,8 @@
 			 * We need to try and force an autonegotiation
 			 * session, then bring up link.
 			 */
-			hw->mac.ops.setup_sfp(hw);
+			if (hw->mac.ops.setup_sfp)
+				hw->mac.ops.setup_sfp(hw);
 			if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
 				schedule_work(&adapter->multispeed_fiber_task);
 		} else {
@@ -4863,16 +4871,13 @@
 {
 	int q_idx, num_q_vectors;
 	struct ixgbe_q_vector *q_vector;
-	int napi_vectors;
 	int (*poll)(struct napi_struct *, int);
 
 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
 		num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-		napi_vectors = adapter->num_rx_queues;
 		poll = &ixgbe_clean_rxtx_many;
 	} else {
 		num_q_vectors = 1;
-		napi_vectors = 1;
 		poll = &ixgbe_poll;
 	}
 
@@ -5169,7 +5174,6 @@
 	adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
 	adapter->dcb_cfg.rx_pba_cfg = pba_equal;
 	adapter->dcb_cfg.pfc_mode_enable = false;
-	adapter->dcb_cfg.round_robin_enable = false;
 	adapter->dcb_set_bitmap = 0x00;
 	ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
 			   adapter->ring_feature[RING_F_DCB].indices);
@@ -5606,6 +5610,10 @@
 	}
 
 	ixgbe_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_DCB
+	kfree(adapter->ixgbe_ieee_pfc);
+	kfree(adapter->ixgbe_ieee_ets);
+#endif
 
 #ifdef CONFIG_PM
 	retval = pci_save_state(pdev);
@@ -5964,7 +5972,8 @@
 		unregister_netdev(adapter->netdev);
 		return;
 	}
-	hw->mac.ops.setup_sfp(hw);
+	if (hw->mac.ops.setup_sfp)
+		hw->mac.ops.setup_sfp(hw);
 
 	if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
 		/* This will also work for DA Twinax connections */
@@ -6095,7 +6104,10 @@
 			       (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
 			       "10 Gbps" :
 			       (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
-			       "1 Gbps" : "unknown speed")),
+			       "1 Gbps" :
+			       (link_speed == IXGBE_LINK_SPEED_100_FULL ?
+			       "100 Mbps" :
+			       "unknown speed"))),
 			       ((flow_rx && flow_tx) ? "RX/TX" :
 			       (flow_rx ? "RX" :
 			       (flow_tx ? "TX" : "None"))));
@@ -7700,16 +7712,6 @@
 
 #endif /* CONFIG_IXGBE_DCA */
 
-/**
- * ixgbe_get_hw_dev return device
- * used by hardware layer to print debugging information
- **/
-struct net_device *ixgbe_get_hw_dev(struct ixgbe_hw *hw)
-{
-	struct ixgbe_adapter *adapter = hw->back;
-	return adapter->netdev;
-}
-
 module_exit(ixgbe_exit_module);
 
 /* ixgbe_main.c */
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
index ea82c5a..f215c4c 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -437,6 +437,7 @@
 	return ret_val;
 }
 
+#ifdef CONFIG_PCI_IOV
 /**
  *  ixgbe_init_mbx_params_pf - set initial values for pf mailbox
  *  @hw: pointer to the HW structure
@@ -465,6 +466,7 @@
 		break;
 	}
 }
+#endif /* CONFIG_PCI_IOV */
 
 struct ixgbe_mbx_operations mbx_ops_generic = {
 	.read                   = ixgbe_read_mbx_pf,
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
index 3df9b15..ada0ce3 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -86,7 +86,9 @@
 s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
 s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
 s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+#ifdef CONFIG_PCI_IOV
 void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+#endif /* CONFIG_PCI_IOV */
 
 extern struct ixgbe_mbx_operations mbx_ops_generic;
 
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 47b1573..187b3a1 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -110,12 +110,10 @@
 	return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
 }
 
-
 static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
 {
 	u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
 	vmolr |= (IXGBE_VMOLR_ROMPE |
-		  IXGBE_VMOLR_ROPE |
 		  IXGBE_VMOLR_BAM);
 	if (aupe)
 		vmolr |= IXGBE_VMOLR_AUPE;
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
index 3a89239..f2518b0 100644
--- a/drivers/net/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -133,17 +133,17 @@
 	}
 
 	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
-	IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | reset_bit));
 	IXGBE_WRITE_FLUSH(hw);
 
 	/* Poll for reset bit to self-clear indicating reset is complete */
 	for (i = 0; i < 10; i++) {
 		udelay(1);
 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
-		if (!(ctrl & IXGBE_CTRL_RST))
+		if (!(ctrl & reset_bit))
 			break;
 	}
-	if (ctrl & IXGBE_CTRL_RST) {
+	if (ctrl & reset_bit) {
 		status = IXGBE_ERR_RESET_FAILED;
 		hw_dbg(hw, "Reset polling failed to complete.\n");
 	}
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index e97ebef..5b441b7 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -161,6 +161,67 @@
 }
 
 static inline void
+jme_mac_rxclk_off(struct jme_adapter *jme)
+{
+	jme->reg_gpreg1 |= GPREG1_RXCLKOFF;
+	jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_mac_rxclk_on(struct jme_adapter *jme)
+{
+	jme->reg_gpreg1 &= ~GPREG1_RXCLKOFF;
+	jwrite32f(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_mac_txclk_off(struct jme_adapter *jme)
+{
+	jme->reg_ghc &= ~(GHC_TO_CLK_SRC | GHC_TXMAC_CLK_SRC);
+	jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_mac_txclk_on(struct jme_adapter *jme)
+{
+	u32 speed = jme->reg_ghc & GHC_SPEED;
+	if (speed == GHC_SPEED_1000M)
+		jme->reg_ghc |= GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
+	else
+		jme->reg_ghc |= GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
+	jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_reset_ghc_speed(struct jme_adapter *jme)
+{
+	jme->reg_ghc &= ~(GHC_SPEED | GHC_DPX);
+	jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_reset_250A2_workaround(struct jme_adapter *jme)
+{
+	jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
+			     GPREG1_RSSPATCH);
+	jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
+}
+
+static inline void
+jme_assert_ghc_reset(struct jme_adapter *jme)
+{
+	jme->reg_ghc |= GHC_SWRST;
+	jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
+jme_clear_ghc_reset(struct jme_adapter *jme)
+{
+	jme->reg_ghc &= ~GHC_SWRST;
+	jwrite32f(jme, JME_GHC, jme->reg_ghc);
+}
+
+static inline void
 jme_reset_mac_processor(struct jme_adapter *jme)
 {
 	static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
@@ -168,9 +229,24 @@
 	u32 gpreg0;
 	int i;
 
-	jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
-	udelay(2);
-	jwrite32(jme, JME_GHC, jme->reg_ghc);
+	jme_reset_ghc_speed(jme);
+	jme_reset_250A2_workaround(jme);
+
+	jme_mac_rxclk_on(jme);
+	jme_mac_txclk_on(jme);
+	udelay(1);
+	jme_assert_ghc_reset(jme);
+	udelay(1);
+	jme_mac_rxclk_off(jme);
+	jme_mac_txclk_off(jme);
+	udelay(1);
+	jme_clear_ghc_reset(jme);
+	udelay(1);
+	jme_mac_rxclk_on(jme);
+	jme_mac_txclk_on(jme);
+	udelay(1);
+	jme_mac_rxclk_off(jme);
+	jme_mac_txclk_off(jme);
 
 	jwrite32(jme, JME_RXDBA_LO, 0x00000000);
 	jwrite32(jme, JME_RXDBA_HI, 0x00000000);
@@ -190,14 +266,6 @@
 	else
 		gpreg0 = GPREG0_DEFAULT;
 	jwrite32(jme, JME_GPREG0, gpreg0);
-	jwrite32(jme, JME_GPREG1, GPREG1_DEFAULT);
-}
-
-static inline void
-jme_reset_ghc_speed(struct jme_adapter *jme)
-{
-	jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX);
-	jwrite32(jme, JME_GHC, jme->reg_ghc);
 }
 
 static inline void
@@ -336,13 +404,13 @@
 }
 
 static inline void
-jme_set_phyfifoa(struct jme_adapter *jme)
+jme_set_phyfifo_5level(struct jme_adapter *jme)
 {
 	jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0004);
 }
 
 static inline void
-jme_set_phyfifob(struct jme_adapter *jme)
+jme_set_phyfifo_8level(struct jme_adapter *jme)
 {
 	jme_mdio_write(jme->dev, jme->mii_if.phy_id, 27, 0x0000);
 }
@@ -351,7 +419,7 @@
 jme_check_link(struct net_device *netdev, int testonly)
 {
 	struct jme_adapter *jme = netdev_priv(netdev);
-	u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, bmcr, gpreg1;
+	u32 phylink, cnt = JME_SPDRSV_TIMEOUT, bmcr;
 	char linkmsg[64];
 	int rc = 0;
 
@@ -414,23 +482,21 @@
 
 		jme->phylink = phylink;
 
-		ghc = jme->reg_ghc & ~(GHC_SPEED | GHC_DPX |
-				GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE |
-				GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY);
+		/*
+		 * The speed/duplex setting of jme->reg_ghc already cleared
+		 * by jme_reset_mac_processor()
+		 */
 		switch (phylink & PHY_LINK_SPEED_MASK) {
 		case PHY_LINK_SPEED_10M:
-			ghc |= GHC_SPEED_10M |
-				GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
+			jme->reg_ghc |= GHC_SPEED_10M;
 			strcat(linkmsg, "10 Mbps, ");
 			break;
 		case PHY_LINK_SPEED_100M:
-			ghc |= GHC_SPEED_100M |
-				GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
+			jme->reg_ghc |= GHC_SPEED_100M;
 			strcat(linkmsg, "100 Mbps, ");
 			break;
 		case PHY_LINK_SPEED_1000M:
-			ghc |= GHC_SPEED_1000M |
-				GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
+			jme->reg_ghc |= GHC_SPEED_1000M;
 			strcat(linkmsg, "1000 Mbps, ");
 			break;
 		default:
@@ -439,42 +505,40 @@
 
 		if (phylink & PHY_LINK_DUPLEX) {
 			jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
-			ghc |= GHC_DPX;
+			jwrite32(jme, JME_TXTRHD, TXTRHD_FULLDUPLEX);
+			jme->reg_ghc |= GHC_DPX;
 		} else {
 			jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
 						TXMCS_BACKOFF |
 						TXMCS_CARRIERSENSE |
 						TXMCS_COLLISION);
-			jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
-				((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
-				TXTRHD_TXREN |
-				((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
+			jwrite32(jme, JME_TXTRHD, TXTRHD_HALFDUPLEX);
 		}
 
-		gpreg1 = GPREG1_DEFAULT;
+		jwrite32(jme, JME_GHC, jme->reg_ghc);
+
 		if (is_buggy250(jme->pdev->device, jme->chiprev)) {
+			jme->reg_gpreg1 &= ~(GPREG1_HALFMODEPATCH |
+					     GPREG1_RSSPATCH);
 			if (!(phylink & PHY_LINK_DUPLEX))
-				gpreg1 |= GPREG1_HALFMODEPATCH;
+				jme->reg_gpreg1 |= GPREG1_HALFMODEPATCH;
 			switch (phylink & PHY_LINK_SPEED_MASK) {
 			case PHY_LINK_SPEED_10M:
-				jme_set_phyfifoa(jme);
-				gpreg1 |= GPREG1_RSSPATCH;
+				jme_set_phyfifo_8level(jme);
+				jme->reg_gpreg1 |= GPREG1_RSSPATCH;
 				break;
 			case PHY_LINK_SPEED_100M:
-				jme_set_phyfifob(jme);
-				gpreg1 |= GPREG1_RSSPATCH;
+				jme_set_phyfifo_5level(jme);
+				jme->reg_gpreg1 |= GPREG1_RSSPATCH;
 				break;
 			case PHY_LINK_SPEED_1000M:
-				jme_set_phyfifoa(jme);
+				jme_set_phyfifo_8level(jme);
 				break;
 			default:
 				break;
 			}
 		}
-
-		jwrite32(jme, JME_GPREG1, gpreg1);
-		jwrite32(jme, JME_GHC, ghc);
-		jme->reg_ghc = ghc;
+		jwrite32(jme, JME_GPREG1, jme->reg_gpreg1);
 
 		strcat(linkmsg, (phylink & PHY_LINK_DUPLEX) ?
 					"Full-Duplex, " :
@@ -613,10 +677,14 @@
 	 * Enable TX Engine
 	 */
 	wmb();
-	jwrite32(jme, JME_TXCS, jme->reg_txcs |
+	jwrite32f(jme, JME_TXCS, jme->reg_txcs |
 				TXCS_SELECT_QUEUE0 |
 				TXCS_ENABLE);
 
+	/*
+	 * Start clock for TX MAC Processor
+	 */
+	jme_mac_txclk_on(jme);
 }
 
 static inline void
@@ -651,6 +719,11 @@
 
 	if (!i)
 		pr_err("Disable TX engine timeout\n");
+
+	/*
+	 * Stop clock for TX MAC Processor
+	 */
+	jme_mac_txclk_off(jme);
 }
 
 static void
@@ -825,16 +898,22 @@
 	/*
 	 * Setup Unicast Filter
 	 */
+	jme_set_unicastaddr(jme->dev);
 	jme_set_multi(jme->dev);
 
 	/*
 	 * Enable RX Engine
 	 */
 	wmb();
-	jwrite32(jme, JME_RXCS, jme->reg_rxcs |
+	jwrite32f(jme, JME_RXCS, jme->reg_rxcs |
 				RXCS_QUEUESEL_Q0 |
 				RXCS_ENABLE |
 				RXCS_QST);
+
+	/*
+	 * Start clock for RX MAC Processor
+	 */
+	jme_mac_rxclk_on(jme);
 }
 
 static inline void
@@ -871,10 +950,40 @@
 	if (!i)
 		pr_err("Disable RX engine timeout\n");
 
+	/*
+	 * Stop clock for RX MAC Processor
+	 */
+	jme_mac_rxclk_off(jme);
+}
+
+static u16
+jme_udpsum(struct sk_buff *skb)
+{
+	u16 csum = 0xFFFFu;
+
+	if (skb->len < (ETH_HLEN + sizeof(struct iphdr)))
+		return csum;
+	if (skb->protocol != htons(ETH_P_IP))
+		return csum;
+	skb_set_network_header(skb, ETH_HLEN);
+	if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
+	    (skb->len < (ETH_HLEN +
+			(ip_hdr(skb)->ihl << 2) +
+			sizeof(struct udphdr)))) {
+		skb_reset_network_header(skb);
+		return csum;
+	}
+	skb_set_transport_header(skb,
+			ETH_HLEN + (ip_hdr(skb)->ihl << 2));
+	csum = udp_hdr(skb)->check;
+	skb_reset_transport_header(skb);
+	skb_reset_network_header(skb);
+
+	return csum;
 }
 
 static int
-jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
+jme_rxsum_ok(struct jme_adapter *jme, u16 flags, struct sk_buff *skb)
 {
 	if (!(flags & (RXWBFLAG_TCPON | RXWBFLAG_UDPON | RXWBFLAG_IPV4)))
 		return false;
@@ -887,7 +996,7 @@
 	}
 
 	if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
-			== RXWBFLAG_UDPON)) {
+			== RXWBFLAG_UDPON) && jme_udpsum(skb)) {
 		if (flags & RXWBFLAG_IPV4)
 			netif_err(jme, rx_err, jme->dev, "UDP Checksum error\n");
 		return false;
@@ -935,7 +1044,7 @@
 		skb_put(skb, framesize);
 		skb->protocol = eth_type_trans(skb, jme->dev);
 
-		if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags)))
+		if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags), skb))
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
 		else
 			skb_checksum_none_assert(skb);
@@ -1207,7 +1316,6 @@
 	tasklet_disable(&jme->rxempty_task);
 
 	if (netif_carrier_ok(netdev)) {
-		jme_reset_ghc_speed(jme);
 		jme_disable_rx_engine(jme);
 		jme_disable_tx_engine(jme);
 		jme_reset_mac_processor(jme);
@@ -1577,6 +1685,38 @@
 }
 
 static inline void
+jme_new_phy_on(struct jme_adapter *jme)
+{
+	u32 reg;
+
+	reg = jread32(jme, JME_PHY_PWR);
+	reg &= ~(PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
+		 PHY_PWR_DWN2 | PHY_PWR_CLKSEL);
+	jwrite32(jme, JME_PHY_PWR, reg);
+
+	pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
+	reg &= ~PE1_GPREG0_PBG;
+	reg |= PE1_GPREG0_ENBG;
+	pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
+}
+
+static inline void
+jme_new_phy_off(struct jme_adapter *jme)
+{
+	u32 reg;
+
+	reg = jread32(jme, JME_PHY_PWR);
+	reg |= PHY_PWR_DWN1SEL | PHY_PWR_DWN1SW |
+	       PHY_PWR_DWN2 | PHY_PWR_CLKSEL;
+	jwrite32(jme, JME_PHY_PWR, reg);
+
+	pci_read_config_dword(jme->pdev, PCI_PRIV_PE1, &reg);
+	reg &= ~PE1_GPREG0_PBG;
+	reg |= PE1_GPREG0_PDD3COLD;
+	pci_write_config_dword(jme->pdev, PCI_PRIV_PE1, reg);
+}
+
+static inline void
 jme_phy_on(struct jme_adapter *jme)
 {
 	u32 bmcr;
@@ -1584,6 +1724,22 @@
 	bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
 	bmcr &= ~BMCR_PDOWN;
 	jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+
+	if (new_phy_power_ctrl(jme->chip_main_rev))
+		jme_new_phy_on(jme);
+}
+
+static inline void
+jme_phy_off(struct jme_adapter *jme)
+{
+	u32 bmcr;
+
+	bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
+	bmcr |= BMCR_PDOWN;
+	jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
+
+	if (new_phy_power_ctrl(jme->chip_main_rev))
+		jme_new_phy_off(jme);
 }
 
 static int
@@ -1606,12 +1762,11 @@
 
 	jme_start_irq(jme);
 
-	if (test_bit(JME_FLAG_SSET, &jme->flags)) {
-		jme_phy_on(jme);
+	jme_phy_on(jme);
+	if (test_bit(JME_FLAG_SSET, &jme->flags))
 		jme_set_settings(netdev, &jme->old_ecmd);
-	} else {
+	else
 		jme_reset_phy_processor(jme);
-	}
 
 	jme_reset_link(jme);
 
@@ -1657,12 +1812,6 @@
 	}
 }
 
-static inline void
-jme_phy_off(struct jme_adapter *jme)
-{
-	jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
-}
-
 static void
 jme_powersave_phy(struct jme_adapter *jme)
 {
@@ -1696,7 +1845,6 @@
 	tasklet_disable(&jme->rxclean_task);
 	tasklet_disable(&jme->rxempty_task);
 
-	jme_reset_ghc_speed(jme);
 	jme_disable_rx_engine(jme);
 	jme_disable_tx_engine(jme);
 	jme_reset_mac_processor(jme);
@@ -1993,27 +2141,34 @@
 	return NETDEV_TX_OK;
 }
 
+static void
+jme_set_unicastaddr(struct net_device *netdev)
+{
+	struct jme_adapter *jme = netdev_priv(netdev);
+	u32 val;
+
+	val = (netdev->dev_addr[3] & 0xff) << 24 |
+	      (netdev->dev_addr[2] & 0xff) << 16 |
+	      (netdev->dev_addr[1] & 0xff) <<  8 |
+	      (netdev->dev_addr[0] & 0xff);
+	jwrite32(jme, JME_RXUMA_LO, val);
+	val = (netdev->dev_addr[5] & 0xff) << 8 |
+	      (netdev->dev_addr[4] & 0xff);
+	jwrite32(jme, JME_RXUMA_HI, val);
+}
+
 static int
 jme_set_macaddr(struct net_device *netdev, void *p)
 {
 	struct jme_adapter *jme = netdev_priv(netdev);
 	struct sockaddr *addr = p;
-	u32 val;
 
 	if (netif_running(netdev))
 		return -EBUSY;
 
 	spin_lock_bh(&jme->macaddr_lock);
 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-
-	val = (addr->sa_data[3] & 0xff) << 24 |
-	      (addr->sa_data[2] & 0xff) << 16 |
-	      (addr->sa_data[1] & 0xff) <<  8 |
-	      (addr->sa_data[0] & 0xff);
-	jwrite32(jme, JME_RXUMA_LO, val);
-	val = (addr->sa_data[5] & 0xff) << 8 |
-	      (addr->sa_data[4] & 0xff);
-	jwrite32(jme, JME_RXUMA_HI, val);
+	jme_set_unicastaddr(netdev);
 	spin_unlock_bh(&jme->macaddr_lock);
 
 	return 0;
@@ -2731,6 +2886,8 @@
 
 	jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
 	jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
+	jme->chip_main_rev = jme->chiprev & 0xF;
+	jme->chip_sub_rev = (jme->chiprev >> 4) & 0xF;
 }
 
 static const struct net_device_ops jme_netdev_ops = {
@@ -2880,6 +3037,7 @@
 	jme->reg_rxmcs = RXMCS_DEFAULT;
 	jme->reg_txpfc = 0;
 	jme->reg_pmcs = PMCS_MFEN;
+	jme->reg_gpreg1 = GPREG1_DEFAULT;
 	set_bit(JME_FLAG_TXCSUM, &jme->flags);
 	set_bit(JME_FLAG_TSO, &jme->flags);
 
@@ -2936,8 +3094,8 @@
 	jme->mii_if.mdio_write = jme_mdio_write;
 
 	jme_clear_pm(jme);
-	jme_set_phyfifoa(jme);
-	pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->rev);
+	jme_set_phyfifo_5level(jme);
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &jme->pcirev);
 	if (!jme->fpgaver)
 		jme_phy_init(jme);
 	jme_phy_off(jme);
@@ -2964,14 +3122,14 @@
 		goto err_out_unmap;
 	}
 
-	netif_info(jme, probe, jme->dev, "%s%s ver:%x rev:%x macaddr:%pM\n",
+	netif_info(jme, probe, jme->dev, "%s%s chiprev:%x pcirev:%x macaddr:%pM\n",
 		   (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
 		   "JMC250 Gigabit Ethernet" :
 		   (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
 		   "JMC260 Fast Ethernet" : "Unknown",
 		   (jme->fpgaver != 0) ? " (FPGA)" : "",
 		   (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
-		   jme->rev, netdev->dev_addr);
+		   jme->pcirev, netdev->dev_addr);
 
 	return 0;
 
@@ -3035,7 +3193,6 @@
 			jme_polling_mode(jme);
 
 		jme_stop_pcc_timer(jme);
-		jme_reset_ghc_speed(jme);
 		jme_disable_rx_engine(jme);
 		jme_disable_tx_engine(jme);
 		jme_reset_mac_processor(jme);
@@ -3066,12 +3223,11 @@
 	jme_clear_pm(jme);
 	pci_restore_state(pdev);
 
-	if (test_bit(JME_FLAG_SSET, &jme->flags)) {
-		jme_phy_on(jme);
+	jme_phy_on(jme);
+	if (test_bit(JME_FLAG_SSET, &jme->flags))
 		jme_set_settings(netdev, &jme->old_ecmd);
-	} else {
+	else
 		jme_reset_phy_processor(jme);
-	}
 
 	jme_start_irq(jme);
 	netif_device_attach(netdev);
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index eac0926..8bf3045 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -26,7 +26,7 @@
 #define __JME_H_INCLUDED__
 
 #define DRV_NAME	"jme"
-#define DRV_VERSION	"1.0.7"
+#define DRV_VERSION	"1.0.8"
 #define PFX		DRV_NAME ": "
 
 #define PCI_DEVICE_ID_JMICRON_JMC250	0x0250
@@ -103,6 +103,37 @@
 #define HALF_US 500	/* 500 ns */
 #define JMESPIIOCTL	SIOCDEVPRIVATE
 
+#define PCI_PRIV_PE1		0xE4
+
+enum pci_priv_pe1_bit_masks {
+	PE1_ASPMSUPRT	= 0x00000003, /*
+				       * RW:
+				       * Aspm_support[1:0]
+				       * (R/W Port of 5C[11:10])
+				       */
+	PE1_MULTIFUN	= 0x00000004, /* RW: Multi_fun_bit */
+	PE1_RDYDMA	= 0x00000008, /* RO: ~link.rdy_for_dma */
+	PE1_ASPMOPTL	= 0x00000030, /* RW: link.rx10s_option[1:0] */
+	PE1_ASPMOPTH	= 0x000000C0, /* RW: 10_req=[3]?HW:[2] */
+	PE1_GPREG0	= 0x0000FF00, /*
+				       * SRW:
+				       * Cfg_gp_reg0
+				       * [7:6] phy_giga BG control
+				       * [5] CREQ_N as CREQ_N1 (CPPE# as CREQ#)
+				       * [4:0] Reserved
+				       */
+	PE1_GPREG0_PBG	= 0x0000C000, /* phy_giga BG control */
+	PE1_GPREG1	= 0x00FF0000, /* RW: Cfg_gp_reg1 */
+	PE1_REVID	= 0xFF000000, /* RO: Rev ID */
+};
+
+enum pci_priv_pe1_values {
+	PE1_GPREG0_ENBG		= 0x00000000, /* en BG */
+	PE1_GPREG0_PDD3COLD	= 0x00004000, /* giga_PD + d3cold */
+	PE1_GPREG0_PDPCIESD	= 0x00008000, /* giga_PD + pcie_shutdown */
+	PE1_GPREG0_PDPCIEIDDQ	= 0x0000C000, /* giga_PD + pcie_iddq */
+};
+
 /*
  * Dynamic(adaptive)/Static PCC values
  */
@@ -403,6 +434,7 @@
 	u32			reg_rxmcs;
 	u32			reg_ghc;
 	u32			reg_pmcs;
+	u32			reg_gpreg1;
 	u32			phylink;
 	u32			tx_ring_size;
 	u32			tx_ring_mask;
@@ -411,8 +443,10 @@
 	u32			rx_ring_mask;
 	u8			mrrs;
 	unsigned int		fpgaver;
-	unsigned int		chiprev;
-	u8			rev;
+	u8			chiprev;
+	u8			chip_main_rev;
+	u8			chip_sub_rev;
+	u8			pcirev;
 	u32			msg_enable;
 	struct ethtool_cmd	old_ecmd;
 	unsigned int		old_mtu;
@@ -497,6 +531,7 @@
 	JME_PMCS	= JME_MAC | 0x60, /* Power Management Control/Stat */
 
 
+	JME_PHY_PWR	= JME_PHY | 0x24, /* New PHY Power Ctrl Register */
 	JME_PHY_CS	= JME_PHY | 0x28, /* PHY Ctrl and Status Register */
 	JME_PHY_LINK	= JME_PHY | 0x30, /* PHY Link Status Register */
 	JME_SMBCSR	= JME_PHY | 0x40, /* SMB Control and Status */
@@ -624,6 +659,14 @@
 	TXTRHD_TXRL_SHIFT	= 0,
 };
 
+enum jme_txtrhd_values {
+	TXTRHD_FULLDUPLEX	= 0x00000000,
+	TXTRHD_HALFDUPLEX	= TXTRHD_TXPEN |
+				  ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
+				  TXTRHD_TXREN |
+				  ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL),
+};
+
 /*
  * RX Control/Status Bits
  */
@@ -779,6 +822,8 @@
  */
 enum jme_ghc_bit_mask {
 	GHC_SWRST		= 0x40000000,
+	GHC_TO_CLK_SRC		= 0x00C00000,
+	GHC_TXMAC_CLK_SRC	= 0x00300000,
 	GHC_DPX			= 0x00000040,
 	GHC_SPEED		= 0x00000030,
 	GHC_LINK_POLL		= 0x00000001,
@@ -833,6 +878,21 @@
 };
 
 /*
+ * New PHY Power Control Register
+ */
+enum jme_phy_pwr_bit_masks {
+	PHY_PWR_DWN1SEL	= 0x01000000, /* Phy_giga.p_PWR_DOWN1_SEL */
+	PHY_PWR_DWN1SW	= 0x02000000, /* Phy_giga.p_PWR_DOWN1_SW */
+	PHY_PWR_DWN2	= 0x04000000, /* Phy_giga.p_PWR_DOWN2 */
+	PHY_PWR_CLKSEL	= 0x08000000, /*
+				       * XTL_OUT Clock select
+				       * (an internal free-running clock)
+				       * 0: xtl_out = phy_giga.A_XTL25_O
+				       * 1: xtl_out = phy_giga.PD_OSC
+				       */
+};
+
+/*
  * Giga PHY Status Registers
  */
 enum jme_phy_link_bit_mask {
@@ -942,18 +1002,17 @@
 
 /*
  * General Purpose REG-1
- * Note: All theses bits defined here are for
- *       Chip mode revision 0x11 only
  */
-enum jme_gpreg1_masks {
+enum jme_gpreg1_bit_masks {
+	GPREG1_RXCLKOFF		= 0x04000000,
+	GPREG1_PCREQN		= 0x00020000,
+	GPREG1_HALFMODEPATCH	= 0x00000040, /* For Chip revision 0x11 only */
+	GPREG1_RSSPATCH		= 0x00000020, /* For Chip revision 0x11 only */
 	GPREG1_INTRDELAYUNIT	= 0x00000018,
 	GPREG1_INTRDELAYENABLE	= 0x00000007,
 };
 
 enum jme_gpreg1_vals {
-	GPREG1_RSSPATCH		= 0x00000040,
-	GPREG1_HALFMODEPATCH	= 0x00000020,
-
 	GPREG1_INTDLYUNIT_16NS	= 0x00000000,
 	GPREG1_INTDLYUNIT_256NS	= 0x00000008,
 	GPREG1_INTDLYUNIT_1US	= 0x00000010,
@@ -967,7 +1026,7 @@
 	GPREG1_INTDLYEN_6U	= 0x00000006,
 	GPREG1_INTDLYEN_7U	= 0x00000007,
 
-	GPREG1_DEFAULT		= 0x00000000,
+	GPREG1_DEFAULT		= GPREG1_PCREQN,
 };
 
 /*
@@ -1184,16 +1243,22 @@
 /*
  * Workaround
  */
-static inline int is_buggy250(unsigned short device, unsigned int chiprev)
+static inline int is_buggy250(unsigned short device, u8 chiprev)
 {
 	return device == PCI_DEVICE_ID_JMICRON_JMC250 && chiprev == 0x11;
 }
 
+static inline int new_phy_power_ctrl(u8 chip_main_rev)
+{
+	return chip_main_rev >= 5;
+}
+
 /*
  * Function prototypes
  */
 static int jme_set_settings(struct net_device *netdev,
 				struct ethtool_cmd *ecmd);
+static void jme_set_unicastaddr(struct net_device *netdev);
 static void jme_set_multi(struct net_device *netdev);
 
 #endif
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 2d9663a..ea0dc45 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -129,10 +129,6 @@
 
 static const struct ethtool_ops loopback_ethtool_ops = {
 	.get_link		= always_on,
-	.set_tso		= ethtool_op_set_tso,
-	.get_tx_csum		= always_on,
-	.get_sg			= always_on,
-	.get_rx_csum		= always_on,
 };
 
 static int loopback_dev_init(struct net_device *dev)
@@ -169,9 +165,12 @@
 	dev->type		= ARPHRD_LOOPBACK;	/* 0x0001*/
 	dev->flags		= IFF_LOOPBACK;
 	dev->priv_flags	       &= ~IFF_XMIT_DST_RELEASE;
+	dev->hw_features	= NETIF_F_ALL_TSO | NETIF_F_UFO;
 	dev->features 		= NETIF_F_SG | NETIF_F_FRAGLIST
-		| NETIF_F_TSO
+		| NETIF_F_ALL_TSO
+		| NETIF_F_UFO
 		| NETIF_F_NO_CSUM
+		| NETIF_F_RXCSUM
 		| NETIF_F_HIGHDMA
 		| NETIF_F_LLTX
 		| NETIF_F_NETNS_LOCAL;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 5933621..2300e45 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -39,7 +39,7 @@
 	struct socket sock;
 	struct socket_wq wq;
 	int vnet_hdr_sz;
-	struct macvlan_dev *vlan;
+	struct macvlan_dev __rcu *vlan;
 	struct file *file;
 	unsigned int flags;
 };
@@ -141,7 +141,8 @@
 	struct macvlan_dev *vlan;
 
 	spin_lock(&macvtap_lock);
-	vlan = rcu_dereference(q->vlan);
+	vlan = rcu_dereference_protected(q->vlan,
+					 lockdep_is_held(&macvtap_lock));
 	if (vlan) {
 		int index = get_slot(vlan, q);
 
@@ -219,7 +220,8 @@
 	/* macvtap_put_queue can free some slots, so go through all slots */
 	spin_lock(&macvtap_lock);
 	for (i = 0; i < MAX_MACVTAP_QUEUES && vlan->numvtaps; i++) {
-		q = rcu_dereference(vlan->taps[i]);
+		q = rcu_dereference_protected(vlan->taps[i],
+					      lockdep_is_held(&macvtap_lock));
 		if (q) {
 			qlist[j++] = q;
 			rcu_assign_pointer(vlan->taps[i], NULL);
@@ -569,7 +571,7 @@
 	}
 
 	rcu_read_lock_bh();
-	vlan = rcu_dereference(q->vlan);
+	vlan = rcu_dereference_bh(q->vlan);
 	if (vlan)
 		macvlan_start_xmit(skb, vlan->dev);
 	else
@@ -583,7 +585,7 @@
 
 err:
 	rcu_read_lock_bh();
-	vlan = rcu_dereference(q->vlan);
+	vlan = rcu_dereference_bh(q->vlan);
 	if (vlan)
 		vlan->dev->stats.tx_dropped++;
 	rcu_read_unlock_bh();
@@ -631,7 +633,7 @@
 	ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
 
 	rcu_read_lock_bh();
-	vlan = rcu_dereference(q->vlan);
+	vlan = rcu_dereference_bh(q->vlan);
 	if (vlan)
 		macvlan_count_rx(vlan, len, ret == 0, 0);
 	rcu_read_unlock_bh();
@@ -727,7 +729,7 @@
 
 	case TUNGETIFF:
 		rcu_read_lock_bh();
-		vlan = rcu_dereference(q->vlan);
+		vlan = rcu_dereference_bh(q->vlan);
 		if (vlan)
 			dev_hold(vlan->dev);
 		rcu_read_unlock_bh();
@@ -736,7 +738,7 @@
 			return -ENOLINK;
 
 		ret = 0;
-		if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) ||
+		if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
 		    put_user(q->flags, &ifr->ifr_flags))
 			ret = -EFAULT;
 		dev_put(vlan->dev);
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 4ffdc18..2765a3c 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1286,6 +1286,21 @@
 	{ PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
 	{ PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
 	{ PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
+	{ PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
+	{ PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
+	{ PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
+	{ PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
+	{ PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
 	{ 0, }
 };
 
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index ea5cfe2..a7f2eed 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -253,7 +253,7 @@
 	unsigned long serial_number;
 	int vendor_specific_offset;
 	int fw_multicast_support;
-	unsigned long features;
+	u32 features;
 	u32 max_tso6;
 	u32 read_dma;
 	u32 write_dma;
@@ -1776,7 +1776,7 @@
 static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled)
 {
 	struct myri10ge_priv *mgp = netdev_priv(netdev);
-	unsigned long flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
+	u32 flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
 
 	if (tso_enabled)
 		netdev->features |= flags;
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 2541321..9fb59d3 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -4489,6 +4489,9 @@
 {
 	struct niu_parent *parent = np->parent;
 	int first_rx_channel, first_tx_channel;
+	int num_rx_rings, num_tx_rings;
+	struct rx_ring_info *rx_rings;
+	struct tx_ring_info *tx_rings;
 	int i, port, err;
 
 	port = np->port;
@@ -4498,18 +4501,21 @@
 		first_tx_channel += parent->txchan_per_port[i];
 	}
 
-	np->num_rx_rings = parent->rxchan_per_port[port];
-	np->num_tx_rings = parent->txchan_per_port[port];
+	num_rx_rings = parent->rxchan_per_port[port];
+	num_tx_rings = parent->txchan_per_port[port];
 
-	netif_set_real_num_rx_queues(np->dev, np->num_rx_rings);
-	netif_set_real_num_tx_queues(np->dev, np->num_tx_rings);
-
-	np->rx_rings = kcalloc(np->num_rx_rings, sizeof(struct rx_ring_info),
-			       GFP_KERNEL);
+	rx_rings = kcalloc(num_rx_rings, sizeof(struct rx_ring_info),
+			   GFP_KERNEL);
 	err = -ENOMEM;
-	if (!np->rx_rings)
+	if (!rx_rings)
 		goto out_err;
 
+	np->num_rx_rings = num_rx_rings;
+	smp_wmb();
+	np->rx_rings = rx_rings;
+
+	netif_set_real_num_rx_queues(np->dev, num_rx_rings);
+
 	for (i = 0; i < np->num_rx_rings; i++) {
 		struct rx_ring_info *rp = &np->rx_rings[i];
 
@@ -4538,12 +4544,18 @@
 			return err;
 	}
 
-	np->tx_rings = kcalloc(np->num_tx_rings, sizeof(struct tx_ring_info),
-			       GFP_KERNEL);
+	tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info),
+			   GFP_KERNEL);
 	err = -ENOMEM;
-	if (!np->tx_rings)
+	if (!tx_rings)
 		goto out_err;
 
+	np->num_tx_rings = num_tx_rings;
+	smp_wmb();
+	np->tx_rings = tx_rings;
+
+	netif_set_real_num_tx_queues(np->dev, num_tx_rings);
+
 	for (i = 0; i < np->num_tx_rings; i++) {
 		struct tx_ring_info *rp = &np->tx_rings[i];
 
@@ -6246,11 +6258,17 @@
 static void niu_get_rx_stats(struct niu *np)
 {
 	unsigned long pkts, dropped, errors, bytes;
+	struct rx_ring_info *rx_rings;
 	int i;
 
 	pkts = dropped = errors = bytes = 0;
+
+	rx_rings = ACCESS_ONCE(np->rx_rings);
+	if (!rx_rings)
+		goto no_rings;
+
 	for (i = 0; i < np->num_rx_rings; i++) {
-		struct rx_ring_info *rp = &np->rx_rings[i];
+		struct rx_ring_info *rp = &rx_rings[i];
 
 		niu_sync_rx_discard_stats(np, rp, 0);
 
@@ -6259,6 +6277,8 @@
 		dropped += rp->rx_dropped;
 		errors += rp->rx_errors;
 	}
+
+no_rings:
 	np->dev->stats.rx_packets = pkts;
 	np->dev->stats.rx_bytes = bytes;
 	np->dev->stats.rx_dropped = dropped;
@@ -6268,16 +6288,24 @@
 static void niu_get_tx_stats(struct niu *np)
 {
 	unsigned long pkts, errors, bytes;
+	struct tx_ring_info *tx_rings;
 	int i;
 
 	pkts = errors = bytes = 0;
+
+	tx_rings = ACCESS_ONCE(np->tx_rings);
+	if (!tx_rings)
+		goto no_rings;
+
 	for (i = 0; i < np->num_tx_rings; i++) {
-		struct tx_ring_info *rp = &np->tx_rings[i];
+		struct tx_ring_info *rp = &tx_rings[i];
 
 		pkts += rp->tx_packets;
 		bytes += rp->tx_bytes;
 		errors += rp->tx_errors;
 	}
+
+no_rings:
 	np->dev->stats.tx_packets = pkts;
 	np->dev->stats.tx_bytes = bytes;
 	np->dev->stats.tx_errors = errors;
@@ -6287,9 +6315,10 @@
 {
 	struct niu *np = netdev_priv(dev);
 
-	niu_get_rx_stats(np);
-	niu_get_tx_stats(np);
-
+	if (netif_running(dev)) {
+		niu_get_rx_stats(np);
+		niu_get_tx_stats(np);
+	}
 	return &dev->stats;
 }
 
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 84134c7..a41b2cf 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1988,12 +1988,11 @@
 	}
 
 	ndev = alloc_etherdev(sizeof(struct ns83820));
-	dev = PRIV(ndev);
-
 	err = -ENOMEM;
-	if (!dev)
+	if (!ndev)
 		goto out;
 
+	dev = PRIV(ndev);
 	dev->ndev = ndev;
 
 	spin_lock_init(&dev->rx_info.lock);
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
index a0c26a9..e1e33c8 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -73,7 +73,7 @@
 	struct pch_gbe_regs_mac_adr mac_adr[16];
 	u32 ADDR_MASK;
 	u32 MIIM;
-	u32 reserve2;
+	u32 MAC_ADDR_LOAD;
 	u32 RGMII_ST;
 	u32 RGMII_CTRL;
 	u32 reserve3[3];
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index d735530..b99e90a 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -29,6 +29,7 @@
 #define PCH_GBE_SHORT_PKT		64
 #define DSC_INIT16			0xC000
 #define PCH_GBE_DMA_ALIGN		0
+#define PCH_GBE_DMA_PADDING		2
 #define PCH_GBE_WATCHDOG_PERIOD		(1 * HZ)	/* watchdog time */
 #define PCH_GBE_COPYBREAK_DEFAULT	256
 #define PCH_GBE_PCI_BAR			1
@@ -88,6 +89,12 @@
 static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
 static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
 			       int data);
+
+inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
+{
+	iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
+}
+
 /**
  * pch_gbe_mac_read_mac_addr - Read MAC address
  * @hw:	            Pointer to the HW structure
@@ -519,7 +526,9 @@
 	struct pch_gbe_adapter *adapter;
 	adapter = container_of(work, struct pch_gbe_adapter, reset_task);
 
+	rtnl_lock();
 	pch_gbe_reinit_locked(adapter);
+	rtnl_unlock();
 }
 
 /**
@@ -528,14 +537,8 @@
  */
 void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
 {
-	struct net_device *netdev = adapter->netdev;
-
-	rtnl_lock();
-	if (netif_running(netdev)) {
-		pch_gbe_down(adapter);
-		pch_gbe_up(adapter);
-	}
-	rtnl_unlock();
+	pch_gbe_down(adapter);
+	pch_gbe_up(adapter);
 }
 
 /**
@@ -1369,16 +1372,13 @@
 	struct pch_gbe_buffer *buffer_info;
 	struct pch_gbe_rx_desc *rx_desc;
 	u32 length;
-	unsigned char tmp_packet[ETH_HLEN];
 	unsigned int i;
 	unsigned int cleaned_count = 0;
 	bool cleaned = false;
-	struct sk_buff *skb;
+	struct sk_buff *skb, *new_skb;
 	u8 dma_status;
 	u16 gbec_status;
 	u32 tcp_ip_status;
-	u8 skb_copy_flag = 0;
-	u8 skb_padding_flag = 0;
 
 	i = rx_ring->next_to_clean;
 
@@ -1422,55 +1422,70 @@
 			pr_err("Receive CRC Error\n");
 		} else {
 			/* get receive length */
-			/* length convert[-3], padding[-2] */
-			length = (rx_desc->rx_words_eob) - 3 - 2;
+			/* length convert[-3] */
+			length = (rx_desc->rx_words_eob) - 3;
 
 			/* Decide the data conversion method */
 			if (!adapter->rx_csum) {
 				/* [Header:14][payload] */
-				skb_padding_flag = 0;
-				skb_copy_flag = 1;
-			} else {
-				/* [Header:14][padding:2][payload] */
-				skb_padding_flag = 1;
-				if (length < copybreak)
-					skb_copy_flag = 1;
-				else
-					skb_copy_flag = 0;
-			}
-
-			/* Data conversion */
-			if (skb_copy_flag) {	/* recycle  skb */
-				struct sk_buff *new_skb;
-				new_skb =
-				    netdev_alloc_skb(netdev,
-						     length + NET_IP_ALIGN);
-				if (new_skb) {
-					if (!skb_padding_flag) {
-						skb_reserve(new_skb,
-								NET_IP_ALIGN);
+				if (NET_IP_ALIGN) {
+					/* Because alignment differs,
+					 * the new_skb is newly allocated,
+					 * and data is copied to new_skb.*/
+					new_skb = netdev_alloc_skb(netdev,
+							 length + NET_IP_ALIGN);
+					if (!new_skb) {
+						/* dorrop error */
+						pr_err("New skb allocation "
+							"Error\n");
+						goto dorrop;
 					}
+					skb_reserve(new_skb, NET_IP_ALIGN);
 					memcpy(new_skb->data, skb->data,
-						length);
-					/* save the skb
-					 * in buffer_info as good */
+					       length);
 					skb = new_skb;
-				} else if (!skb_padding_flag) {
-					/* dorrop error */
-					pr_err("New skb allocation Error\n");
-					goto dorrop;
+				} else {
+					/* DMA buffer is used as SKB as it is.*/
+					buffer_info->skb = NULL;
 				}
 			} else {
-				buffer_info->skb = NULL;
+				/* [Header:14][padding:2][payload] */
+				/* The length includes padding length */
+				length = length - PCH_GBE_DMA_PADDING;
+				if ((length < copybreak) ||
+				    (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
+					/* Because alignment differs,
+					 * the new_skb is newly allocated,
+					 * and data is copied to new_skb.
+					 * Padding data is deleted
+					 * at the time of a copy.*/
+					new_skb = netdev_alloc_skb(netdev,
+							 length + NET_IP_ALIGN);
+					if (!new_skb) {
+						/* dorrop error */
+						pr_err("New skb allocation "
+							"Error\n");
+						goto dorrop;
+					}
+					skb_reserve(new_skb, NET_IP_ALIGN);
+					memcpy(new_skb->data, skb->data,
+					       ETH_HLEN);
+					memcpy(&new_skb->data[ETH_HLEN],
+					       &skb->data[ETH_HLEN +
+					       PCH_GBE_DMA_PADDING],
+					       length - ETH_HLEN);
+					skb = new_skb;
+				} else {
+					/* Padding data is deleted
+					 * by moving header data.*/
+					memmove(&skb->data[PCH_GBE_DMA_PADDING],
+						&skb->data[0], ETH_HLEN);
+					skb_reserve(skb, NET_IP_ALIGN);
+					buffer_info->skb = NULL;
+				}
 			}
-			if (skb_padding_flag) {
-				memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN);
-				memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0],
-					ETH_HLEN);
-				skb_reserve(skb, NET_IP_ALIGN);
-
-			}
-
+			/* The length includes FCS length */
+			length = length - ETH_FCS_LEN;
 			/* update status of driver */
 			adapter->stats.rx_bytes += length;
 			adapter->stats.rx_packets++;
@@ -2247,7 +2262,7 @@
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
-	flush_scheduled_work();
+	cancel_work_sync(&adapter->reset_task);
 	unregister_netdev(netdev);
 
 	pch_gbe_hal_phy_hw_reset(&adapter->hw);
@@ -2322,6 +2337,7 @@
 	netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
 	pch_gbe_set_ethtool_ops(netdev);
 
+	pch_gbe_mac_load_mac_addr(&adapter->hw);
 	pch_gbe_mac_reset_hw(&adapter->hw);
 
 	/* setup the private structure */
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 1f42f6a..d3cb772 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -1488,12 +1488,10 @@
     
 	/* 
 	 * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total.
-	 * Early datasheets said to poll the reset bit, but now they say that
-	 * it "is not a reliable indicator and subsequently should be ignored."
-	 * We wait at least 10ms.
+	 * We wait at least 2ms.
 	 */
 
-	mdelay(10);
+	mdelay(2);
 
 	/*
 	 * Reset RBCR[01] back to zero as per magic incantation.
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 35fda5a..392a6c4 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -77,7 +77,6 @@
 	  Currently supports the DP83865 PHY.
 
 config STE10XP
-	depends on PHYLIB
 	tristate "Driver for STMicroelectronics STe10Xp PHYs"
 	---help---
 	  This is the driver for the STe100p and STe101p PHYs.
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 0fd1678..590f902 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -19,13 +19,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/phy.h>
-
-#define	PHY_ID_KSZ9021			0x00221611
-#define	PHY_ID_KS8737			0x00221720
-#define	PHY_ID_KS8041			0x00221510
-#define	PHY_ID_KS8051			0x00221550
-/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
-#define	PHY_ID_KS8001			0x0022161A
+#include <linux/micrel_phy.h>
 
 /* general Interrupt control/status reg in vendor specific block. */
 #define MII_KSZPHY_INTCS			0x1B
@@ -46,6 +40,7 @@
 #define KSZPHY_CTRL_INT_ACTIVE_HIGH		(1 << 9)
 #define KSZ9021_CTRL_INT_ACTIVE_HIGH		(1 << 14)
 #define KS8737_CTRL_INT_ACTIVE_HIGH		(1 << 14)
+#define KSZ8051_RMII_50MHZ_CLK			(1 << 7)
 
 static int kszphy_ack_interrupt(struct phy_device *phydev)
 {
@@ -106,6 +101,19 @@
 	return 0;
 }
 
+static int ks8051_config_init(struct phy_device *phydev)
+{
+	int regval;
+
+	if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
+		regval = phy_read(phydev, MII_KSZPHY_CTRL);
+		regval |= KSZ8051_RMII_50MHZ_CLK;
+		phy_write(phydev, MII_KSZPHY_CTRL, regval);
+	}
+
+	return 0;
+}
+
 static struct phy_driver ks8737_driver = {
 	.phy_id		= PHY_ID_KS8737,
 	.phy_id_mask	= 0x00fffff0,
@@ -142,7 +150,7 @@
 	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause
 				| SUPPORTED_Asym_Pause),
 	.flags		= PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
-	.config_init	= kszphy_config_init,
+	.config_init	= ks8051_config_init,
 	.config_aneg	= genphy_config_aneg,
 	.read_status	= genphy_read_status,
 	.ack_interrupt	= kszphy_ack_interrupt,
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index c7a6c44..9f6d670 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -592,8 +592,8 @@
 			ppp_release(NULL, file);
 			err = 0;
 		} else
-			printk(KERN_DEBUG "PPPIOCDETACH file->f_count=%ld\n",
-			       atomic_long_read(&file->f_count));
+			pr_warn("PPPIOCDETACH file->f_count=%ld\n",
+				atomic_long_read(&file->f_count));
 		mutex_unlock(&ppp_mutex);
 		return err;
 	}
@@ -630,7 +630,7 @@
 
 	if (pf->kind != INTERFACE) {
 		/* can't happen */
-		printk(KERN_ERR "PPP: not interface or channel??\n");
+		pr_err("PPP: not interface or channel??\n");
 		return -EINVAL;
 	}
 
@@ -704,7 +704,8 @@
 		}
 		vj = slhc_init(val2+1, val+1);
 		if (!vj) {
-			printk(KERN_ERR "PPP: no memory (VJ compressor)\n");
+			netdev_err(ppp->dev,
+				   "PPP: no memory (VJ compressor)\n");
 			err = -ENOMEM;
 			break;
 		}
@@ -898,17 +899,17 @@
 {
 	int err;
 
-	printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n");
+	pr_info("PPP generic driver version " PPP_VERSION "\n");
 
 	err = register_pernet_device(&ppp_net_ops);
 	if (err) {
-		printk(KERN_ERR "failed to register PPP pernet device (%d)\n", err);
+		pr_err("failed to register PPP pernet device (%d)\n", err);
 		goto out;
 	}
 
 	err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
 	if (err) {
-		printk(KERN_ERR "failed to register PPP device (%d)\n", err);
+		pr_err("failed to register PPP device (%d)\n", err);
 		goto out_net;
 	}
 
@@ -1078,7 +1079,7 @@
 	new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
 	if (!new_skb) {
 		if (net_ratelimit())
-			printk(KERN_ERR "PPP: no memory (comp pkt)\n");
+			netdev_err(ppp->dev, "PPP: no memory (comp pkt)\n");
 		return NULL;
 	}
 	if (ppp->dev->hard_header_len > PPP_HDRLEN)
@@ -1108,7 +1109,7 @@
 		 * the same number.
 		 */
 		if (net_ratelimit())
-			printk(KERN_ERR "ppp: compressor dropped pkt\n");
+			netdev_err(ppp->dev, "ppp: compressor dropped pkt\n");
 		kfree_skb(skb);
 		kfree_skb(new_skb);
 		new_skb = NULL;
@@ -1138,7 +1139,9 @@
 		if (ppp->pass_filter &&
 		    sk_run_filter(skb, ppp->pass_filter) == 0) {
 			if (ppp->debug & 1)
-				printk(KERN_DEBUG "PPP: outbound frame not passed\n");
+				netdev_printk(KERN_DEBUG, ppp->dev,
+					      "PPP: outbound frame "
+					      "not passed\n");
 			kfree_skb(skb);
 			return;
 		}
@@ -1164,7 +1167,7 @@
 		new_skb = alloc_skb(skb->len + ppp->dev->hard_header_len - 2,
 				    GFP_ATOMIC);
 		if (!new_skb) {
-			printk(KERN_ERR "PPP: no memory (VJ comp pkt)\n");
+			netdev_err(ppp->dev, "PPP: no memory (VJ comp pkt)\n");
 			goto drop;
 		}
 		skb_reserve(new_skb, ppp->dev->hard_header_len - 2);
@@ -1202,7 +1205,9 @@
 	    proto != PPP_LCP && proto != PPP_CCP) {
 		if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
 			if (net_ratelimit())
-				printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n");
+				netdev_err(ppp->dev,
+					   "ppp: compression required but "
+					   "down - pkt dropped.\n");
 			goto drop;
 		}
 		skb = pad_compress_skb(ppp, skb);
@@ -1505,7 +1510,7 @@
  noskb:
 	spin_unlock_bh(&pch->downl);
 	if (ppp->debug & 1)
-		printk(KERN_ERR "PPP: no memory (fragment)\n");
+		netdev_err(ppp->dev, "PPP: no memory (fragment)\n");
 	++ppp->dev->stats.tx_errors;
 	++ppp->nxseq;
 	return 1;	/* abandon the frame */
@@ -1686,7 +1691,8 @@
 			/* copy to a new sk_buff with more tailroom */
 			ns = dev_alloc_skb(skb->len + 128);
 			if (!ns) {
-				printk(KERN_ERR"PPP: no memory (VJ decomp)\n");
+				netdev_err(ppp->dev, "PPP: no memory "
+					   "(VJ decomp)\n");
 				goto err;
 			}
 			skb_reserve(ns, 2);
@@ -1699,7 +1705,8 @@
 
 		len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
 		if (len <= 0) {
-			printk(KERN_DEBUG "PPP: VJ decompression error\n");
+			netdev_printk(KERN_DEBUG, ppp->dev,
+				      "PPP: VJ decompression error\n");
 			goto err;
 		}
 		len += 2;
@@ -1721,7 +1728,7 @@
 			goto err;
 
 		if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
-			printk(KERN_ERR "PPP: VJ uncompressed error\n");
+			netdev_err(ppp->dev, "PPP: VJ uncompressed error\n");
 			goto err;
 		}
 		proto = PPP_IP;
@@ -1762,8 +1769,9 @@
 			if (ppp->pass_filter &&
 			    sk_run_filter(skb, ppp->pass_filter) == 0) {
 				if (ppp->debug & 1)
-					printk(KERN_DEBUG "PPP: inbound frame "
-					       "not passed\n");
+					netdev_printk(KERN_DEBUG, ppp->dev,
+						      "PPP: inbound frame "
+						      "not passed\n");
 				kfree_skb(skb);
 				return;
 			}
@@ -1821,7 +1829,8 @@
 
 		ns = dev_alloc_skb(obuff_size);
 		if (!ns) {
-			printk(KERN_ERR "ppp_decompress_frame: no memory\n");
+			netdev_err(ppp->dev, "ppp_decompress_frame: "
+				   "no memory\n");
 			goto err;
 		}
 		/* the decompressor still expects the A/C bytes in the hdr */
@@ -1989,7 +1998,7 @@
 	u32 seq = ppp->nextseq;
 	u32 minseq = ppp->minseq;
 	struct sk_buff_head *list = &ppp->mrq;
-	struct sk_buff *p, *next;
+	struct sk_buff *p, *tmp;
 	struct sk_buff *head, *tail;
 	struct sk_buff *skb = NULL;
 	int lost = 0, len = 0;
@@ -1998,13 +2007,15 @@
 		return NULL;
 	head = list->next;
 	tail = NULL;
-	for (p = head; p != (struct sk_buff *) list; p = next) {
-		next = p->next;
+	skb_queue_walk_safe(list, p, tmp) {
+	again:
 		if (seq_before(PPP_MP_CB(p)->sequence, seq)) {
 			/* this can't happen, anyway ignore the skb */
-			printk(KERN_ERR "ppp_mp_reconstruct bad seq %u < %u\n",
-			       PPP_MP_CB(p)->sequence, seq);
-			head = next;
+			netdev_err(ppp->dev, "ppp_mp_reconstruct bad "
+				   "seq %u < %u\n",
+				   PPP_MP_CB(p)->sequence, seq);
+			__skb_unlink(p, list);
+			kfree_skb(p);
 			continue;
 		}
 		if (PPP_MP_CB(p)->sequence != seq) {
@@ -2016,8 +2027,7 @@
 			lost = 1;
 			seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
 				minseq + 1: PPP_MP_CB(p)->sequence;
-			next = p;
-			continue;
+			goto again;
 		}
 
 		/*
@@ -2042,17 +2052,9 @@
 		    (PPP_MP_CB(head)->BEbits & B)) {
 			if (len > ppp->mrru + 2) {
 				++ppp->dev->stats.rx_length_errors;
-				printk(KERN_DEBUG "PPP: reconstructed packet"
-				       " is too long (%d)\n", len);
-			} else if (p == head) {
-				/* fragment is complete packet - reuse skb */
-				tail = p;
-				skb = skb_get(p);
-				break;
-			} else if ((skb = dev_alloc_skb(len)) == NULL) {
-				++ppp->dev->stats.rx_missed_errors;
-				printk(KERN_DEBUG "PPP: no memory for "
-				       "reconstructed packet");
+				netdev_printk(KERN_DEBUG, ppp->dev,
+					      "PPP: reconstructed packet"
+					      " is too long (%d)\n", len);
 			} else {
 				tail = p;
 				break;
@@ -2065,9 +2067,17 @@
 		 * and we haven't found a complete valid packet yet,
 		 * we can discard up to and including this fragment.
 		 */
-		if (PPP_MP_CB(p)->BEbits & E)
-			head = next;
+		if (PPP_MP_CB(p)->BEbits & E) {
+			struct sk_buff *tmp2;
 
+			skb_queue_reverse_walk_from_safe(list, p, tmp2) {
+				__skb_unlink(p, list);
+				kfree_skb(p);
+			}
+			head = skb_peek(list);
+			if (!head)
+				break;
+		}
 		++seq;
 	}
 
@@ -2077,26 +2087,37 @@
 		   signal a receive error. */
 		if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
 			if (ppp->debug & 1)
-				printk(KERN_DEBUG "  missed pkts %u..%u\n",
-				       ppp->nextseq,
-				       PPP_MP_CB(head)->sequence-1);
+				netdev_printk(KERN_DEBUG, ppp->dev,
+					      "  missed pkts %u..%u\n",
+					      ppp->nextseq,
+					      PPP_MP_CB(head)->sequence-1);
 			++ppp->dev->stats.rx_dropped;
 			ppp_receive_error(ppp);
 		}
 
-		if (head != tail)
-			/* copy to a single skb */
-			for (p = head; p != tail->next; p = p->next)
-				skb_copy_bits(p, 0, skb_put(skb, p->len), p->len);
-		ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
-		head = tail->next;
-	}
+		skb = head;
+		if (head != tail) {
+			struct sk_buff **fragpp = &skb_shinfo(skb)->frag_list;
+			p = skb_queue_next(list, head);
+			__skb_unlink(skb, list);
+			skb_queue_walk_from_safe(list, p, tmp) {
+				__skb_unlink(p, list);
+				*fragpp = p;
+				p->next = NULL;
+				fragpp = &p->next;
 
-	/* Discard all the skbuffs that we have copied the data out of
-	   or that we can't use. */
-	while ((p = list->next) != head) {
-		__skb_unlink(p, list);
-		kfree_skb(p);
+				skb->len += p->len;
+				skb->data_len += p->len;
+				skb->truesize += p->len;
+
+				if (p == tail)
+					break;
+			}
+		} else {
+			__skb_unlink(skb, list);
+		}
+
+		ppp->nextseq = PPP_MP_CB(tail)->sequence + 1;
 	}
 
 	return skb;
@@ -2617,8 +2638,8 @@
 	ret = register_netdev(dev);
 	if (ret != 0) {
 		unit_put(&pn->units_idr, unit);
-		printk(KERN_ERR "PPP: couldn't register device %s (%d)\n",
-		       dev->name, ret);
+		netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
+			   dev->name, ret);
 		goto out2;
 	}
 
@@ -2690,9 +2711,9 @@
 
 	if (!ppp->file.dead || ppp->n_channels) {
 		/* "can't happen" */
-		printk(KERN_ERR "ppp: destroying ppp struct %p but dead=%d "
-		       "n_channels=%d !\n", ppp, ppp->file.dead,
-		       ppp->n_channels);
+		netdev_err(ppp->dev, "ppp: destroying ppp struct %p "
+			   "but dead=%d n_channels=%d !\n",
+			   ppp, ppp->file.dead, ppp->n_channels);
 		return;
 	}
 
@@ -2834,8 +2855,7 @@
 
 	if (!pch->file.dead) {
 		/* "can't happen" */
-		printk(KERN_ERR "ppp: destroying undead channel %p !\n",
-		       pch);
+		pr_err("ppp: destroying undead channel %p !\n", pch);
 		return;
 	}
 	skb_queue_purge(&pch->file.xq);
@@ -2847,7 +2867,7 @@
 {
 	/* should never happen */
 	if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
-		printk(KERN_ERR "PPP: removing module but units remain!\n");
+		pr_err("PPP: removing module but units remain!\n");
 	unregister_chrdev(PPP_MAJOR, "ppp");
 	device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
 	class_destroy(ppp_class);
@@ -2865,7 +2885,7 @@
 
 again:
 	if (!idr_pre_get(p, GFP_KERNEL)) {
-		printk(KERN_ERR "PPP: No free memory for idr\n");
+		pr_err("PPP: No free memory for idr\n");
 		return -ENOMEM;
 	}
 
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index bde7d61..469ab0b 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -973,7 +973,8 @@
 		if (pm)
 			pm_request_resume(&tp->pci_dev->dev);
 		netif_carrier_on(dev);
-		netif_info(tp, ifup, dev, "link up\n");
+		if (net_ratelimit())
+			netif_info(tp, ifup, dev, "link up\n");
 	} else {
 		netif_carrier_off(dev);
 		netif_info(tp, ifdown, dev, "link down\n");
@@ -3189,6 +3190,8 @@
 	if (pci_dev_run_wake(pdev))
 		pm_runtime_put_noidle(&pdev->dev);
 
+	netif_carrier_off(dev);
+
 out:
 	return rc;
 
@@ -3757,7 +3760,8 @@
 	RTL_W16(IntrMitigate, 0x5151);
 
 	/* Work around for RxFIFO overflow. */
-	if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
+	if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
+	    tp->mac_version == RTL_GIGA_MAC_VER_22) {
 		tp->intr_event |= RxFIFOOver | PCSTimeout;
 		tp->intr_event &= ~RxOverflow;
 	}
@@ -4639,12 +4643,33 @@
 			break;
 		}
 
-		/* Work around for rx fifo overflow */
-		if (unlikely(status & RxFIFOOver) &&
-		(tp->mac_version == RTL_GIGA_MAC_VER_11)) {
-			netif_stop_queue(dev);
-			rtl8169_tx_timeout(dev);
-			break;
+		if (unlikely(status & RxFIFOOver)) {
+			switch (tp->mac_version) {
+			/* Work around for rx fifo overflow */
+			case RTL_GIGA_MAC_VER_11:
+			case RTL_GIGA_MAC_VER_22:
+			case RTL_GIGA_MAC_VER_26:
+				netif_stop_queue(dev);
+				rtl8169_tx_timeout(dev);
+				goto done;
+			/* Testers needed. */
+			case RTL_GIGA_MAC_VER_17:
+			case RTL_GIGA_MAC_VER_19:
+			case RTL_GIGA_MAC_VER_20:
+			case RTL_GIGA_MAC_VER_21:
+			case RTL_GIGA_MAC_VER_23:
+			case RTL_GIGA_MAC_VER_24:
+			case RTL_GIGA_MAC_VER_27:
+			case RTL_GIGA_MAC_VER_28:
+			/* Experimental science. Pktgen proof. */
+			case RTL_GIGA_MAC_VER_12:
+			case RTL_GIGA_MAC_VER_25:
+				if (status == RxFIFOOver)
+					goto done;
+				break;
+			default:
+				break;
+			}
 		}
 
 		if (unlikely(status & SYSErr)) {
@@ -4680,7 +4705,7 @@
 			(status & RxFIFOOver) ? (status | RxOverflow) : status);
 		status = RTL_R16(IntrStatus);
 	}
-
+done:
 	return IRQ_RETVAL(handled);
 }
 
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 002bac7..35b7bc5 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -21,6 +21,7 @@
 #include <linux/ethtool.h>
 #include <linux/topology.h>
 #include <linux/gfp.h>
+#include <linux/cpu_rmap.h>
 #include "net_driver.h"
 #include "efx.h"
 #include "nic.h"
@@ -307,6 +308,8 @@
 			channel->irq_mod_score = 0;
 		}
 
+		efx_filter_rfs_expire(channel);
+
 		/* There is no race here; although napi_disable() will
 		 * only wait for napi_complete(), this isn't a problem
 		 * since efx_channel_processed() will have no effect if
@@ -673,7 +676,7 @@
 
 		efx_for_each_channel_rx_queue(rx_queue, channel)
 			efx_fini_rx_queue(rx_queue);
-		efx_for_each_channel_tx_queue(tx_queue, channel)
+		efx_for_each_possible_channel_tx_queue(tx_queue, channel)
 			efx_fini_tx_queue(tx_queue);
 		efx_fini_eventq(channel);
 	}
@@ -689,7 +692,7 @@
 
 	efx_for_each_channel_rx_queue(rx_queue, channel)
 		efx_remove_rx_queue(rx_queue);
-	efx_for_each_channel_tx_queue(tx_queue, channel)
+	efx_for_each_possible_channel_tx_queue(tx_queue, channel)
 		efx_remove_tx_queue(tx_queue);
 	efx_remove_eventq(channel);
 }
@@ -1175,10 +1178,32 @@
 	return count;
 }
 
+static int
+efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries)
+{
+#ifdef CONFIG_RFS_ACCEL
+	int i, rc;
+
+	efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels);
+	if (!efx->net_dev->rx_cpu_rmap)
+		return -ENOMEM;
+	for (i = 0; i < efx->n_rx_channels; i++) {
+		rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
+				      xentries[i].vector);
+		if (rc) {
+			free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+			efx->net_dev->rx_cpu_rmap = NULL;
+			return rc;
+		}
+	}
+#endif
+	return 0;
+}
+
 /* Probe the number and type of interrupts we are able to obtain, and
  * the resulting numbers of channels and RX queues.
  */
-static void efx_probe_interrupts(struct efx_nic *efx)
+static int efx_probe_interrupts(struct efx_nic *efx)
 {
 	int max_channels =
 		min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
@@ -1220,6 +1245,11 @@
 				efx->n_tx_channels = efx->n_channels;
 				efx->n_rx_channels = efx->n_channels;
 			}
+			rc = efx_init_rx_cpu_rmap(efx, xentries);
+			if (rc) {
+				pci_disable_msix(efx->pci_dev);
+				return rc;
+			}
 			for (i = 0; i < n_channels; i++)
 				efx_get_channel(efx, i)->irq =
 					xentries[i].vector;
@@ -1253,6 +1283,8 @@
 		efx->n_tx_channels = 1;
 		efx->legacy_irq = efx->pci_dev->irq;
 	}
+
+	return 0;
 }
 
 static void efx_remove_interrupts(struct efx_nic *efx)
@@ -1271,21 +1303,8 @@
 
 static void efx_set_channels(struct efx_nic *efx)
 {
-	struct efx_channel *channel;
-	struct efx_tx_queue *tx_queue;
-
 	efx->tx_channel_offset =
 		separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
-
-	/* Channel pointers were set in efx_init_struct() but we now
-	 * need to clear them for TX queues in any RX-only channels. */
-	efx_for_each_channel(channel, efx) {
-		if (channel->channel - efx->tx_channel_offset >=
-		    efx->n_tx_channels) {
-			efx_for_each_channel_tx_queue(tx_queue, channel)
-				tx_queue->channel = NULL;
-		}
-	}
 }
 
 static int efx_probe_nic(struct efx_nic *efx)
@@ -1302,7 +1321,9 @@
 
 	/* Determine the number of channels and queues by trying to hook
 	 * in MSI-X interrupts. */
-	efx_probe_interrupts(efx);
+	rc = efx_probe_interrupts(efx);
+	if (rc)
+		goto fail;
 
 	if (efx->n_channels > 1)
 		get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
@@ -1317,6 +1338,10 @@
 	efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
 
 	return 0;
+
+fail:
+	efx->type->remove(efx);
+	return rc;
 }
 
 static void efx_remove_nic(struct efx_nic *efx)
@@ -1531,9 +1556,9 @@
 	efx->irq_rx_adaptive = rx_adaptive;
 	efx->irq_rx_moderation = rx_ticks;
 	efx_for_each_channel(channel, efx) {
-		if (efx_channel_get_rx_queue(channel))
+		if (efx_channel_has_rx_queue(channel))
 			channel->irq_moderation = rx_ticks;
-		else if (efx_channel_get_tx_queue(channel, 0))
+		else if (efx_channel_has_tx_queues(channel))
 			channel->irq_moderation = tx_ticks;
 	}
 }
@@ -1849,6 +1874,10 @@
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller = efx_netpoll,
 #endif
+	.ndo_setup_tc		= efx_setup_tc,
+#ifdef CONFIG_RFS_ACCEL
+	.ndo_rx_flow_steer	= efx_filter_rfs,
+#endif
 };
 
 static void efx_update_name(struct efx_nic *efx)
@@ -1910,10 +1939,8 @@
 
 	efx_for_each_channel(channel, efx) {
 		struct efx_tx_queue *tx_queue;
-		efx_for_each_channel_tx_queue(tx_queue, channel) {
-			tx_queue->core_txq = netdev_get_tx_queue(
-				efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES);
-		}
+		efx_for_each_channel_tx_queue(tx_queue, channel)
+			efx_init_tx_queue_core_txq(tx_queue);
 	}
 
 	/* Always start with carrier off; PHY events will detect the link */
@@ -2288,6 +2315,10 @@
  */
 static void efx_pci_remove_main(struct efx_nic *efx)
 {
+#ifdef CONFIG_RFS_ACCEL
+	free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+	efx->net_dev->rx_cpu_rmap = NULL;
+#endif
 	efx_nic_fini_interrupt(efx);
 	efx_fini_channels(efx);
 	efx_fini_port(efx);
@@ -2401,7 +2432,8 @@
 	int i, rc;
 
 	/* Allocate and initialise a struct net_device and struct efx_nic */
-	net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
+	net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
+				     EFX_MAX_RX_QUEUES);
 	if (!net_dev)
 		return -ENOMEM;
 	net_dev->features |= (type->offload_features | NETIF_F_SG |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index d43a7e5..cbce62b 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -29,6 +29,7 @@
 extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
 extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
 extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
 extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
 extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
 extern netdev_tx_t
@@ -36,6 +37,7 @@
 extern netdev_tx_t
 efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
 extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
 
 /* RX */
 extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -74,6 +76,21 @@
 				    struct efx_filter_spec *spec);
 extern void efx_filter_clear_rx(struct efx_nic *efx,
 				enum efx_filter_priority priority);
+#ifdef CONFIG_RFS_ACCEL
+extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+			  u16 rxq_index, u32 flow_id);
+extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
+static inline void efx_filter_rfs_expire(struct efx_channel *channel)
+{
+	if (channel->rfs_filters_added >= 60 &&
+	    __efx_filter_rfs_expire(channel->efx, 100))
+		channel->rfs_filters_added -= 60;
+}
+#define efx_filter_rfs_enabled() 1
+#else
+static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
+#define efx_filter_rfs_enabled() 0
+#endif
 
 /* Channels */
 extern void efx_process_channel_now(struct efx_channel *channel);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 0e8bb19..272cfe7 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -502,7 +502,7 @@
 static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
 {
 	struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev);
-	unsigned long features;
+	u32 features;
 
 	features = NETIF_F_TSO;
 	if (efx->type->offload_features & NETIF_F_V6_CSUM)
@@ -519,7 +519,7 @@
 static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
-	unsigned long features = efx->type->offload_features & NETIF_F_ALL_CSUM;
+	u32 features = efx->type->offload_features & NETIF_F_ALL_CSUM;
 
 	if (enable)
 		net_dev->features |= features;
@@ -631,7 +631,7 @@
 	/* Find lowest IRQ moderation across all used TX queues */
 	coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
 	efx_for_each_channel(channel, efx) {
-		if (!efx_channel_get_tx_queue(channel, 0))
+		if (!efx_channel_has_tx_queues(channel))
 			continue;
 		if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
 			if (channel->channel < efx->n_rx_channels)
@@ -676,8 +676,8 @@
 
 	/* If the channel is shared only allow RX parameters to be set */
 	efx_for_each_channel(channel, efx) {
-		if (efx_channel_get_rx_queue(channel) &&
-		    efx_channel_get_tx_queue(channel, 0) &&
+		if (efx_channel_has_rx_queue(channel) &&
+		    efx_channel_has_tx_queues(channel) &&
 		    tx_usecs) {
 			netif_err(efx, drv, efx->net_dev, "Channel is shared. "
 				  "Only RX coalescing may be set\n");
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
index d4722c4..95a980f 100644
--- a/drivers/net/sfc/filter.c
+++ b/drivers/net/sfc/filter.c
@@ -8,6 +8,7 @@
  */
 
 #include <linux/in.h>
+#include <net/ip.h>
 #include "efx.h"
 #include "filter.h"
 #include "io.h"
@@ -27,6 +28,10 @@
  */
 #define FILTER_CTL_SRCH_MAX 200
 
+/* Don't try very hard to find space for performance hints, as this is
+ * counter-productive. */
+#define FILTER_CTL_SRCH_HINT_MAX 5
+
 enum efx_filter_table_id {
 	EFX_FILTER_TABLE_RX_IP = 0,
 	EFX_FILTER_TABLE_RX_MAC,
@@ -47,6 +52,10 @@
 struct efx_filter_state {
 	spinlock_t	lock;
 	struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
+#ifdef CONFIG_RFS_ACCEL
+	u32		*rps_flow_id;
+	unsigned	rps_expire_index;
+#endif
 };
 
 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
@@ -325,15 +334,16 @@
 			     struct efx_filter_spec *spec, u32 key,
 			     bool for_insert, int *depth_required)
 {
-	unsigned hash, incr, filter_idx, depth;
+	unsigned hash, incr, filter_idx, depth, depth_max;
 	struct efx_filter_spec *cmp;
 
 	hash = efx_filter_hash(key);
 	incr = efx_filter_increment(key);
+	depth_max = (spec->priority <= EFX_FILTER_PRI_HINT ?
+		     FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX);
 
 	for (depth = 1, filter_idx = hash & (table->size - 1);
-	     depth <= FILTER_CTL_SRCH_MAX &&
-		     test_bit(filter_idx, table->used_bitmap);
+	     depth <= depth_max && test_bit(filter_idx, table->used_bitmap);
 	     ++depth) {
 		cmp = &table->spec[filter_idx];
 		if (efx_filter_equal(spec, cmp))
@@ -342,7 +352,7 @@
 	}
 	if (!for_insert)
 		return -ENOENT;
-	if (depth > FILTER_CTL_SRCH_MAX)
+	if (depth > depth_max)
 		return -EBUSY;
 found:
 	*depth_required = depth;
@@ -562,6 +572,13 @@
 	spin_lock_init(&state->lock);
 
 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+#ifdef CONFIG_RFS_ACCEL
+		state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
+					     sizeof(*state->rps_flow_id),
+					     GFP_KERNEL);
+		if (!state->rps_flow_id)
+			goto fail;
+#endif
 		table = &state->table[EFX_FILTER_TABLE_RX_IP];
 		table->id = EFX_FILTER_TABLE_RX_IP;
 		table->offset = FR_BZ_RX_FILTER_TBL0;
@@ -607,5 +624,97 @@
 		kfree(state->table[table_id].used_bitmap);
 		vfree(state->table[table_id].spec);
 	}
+#ifdef CONFIG_RFS_ACCEL
+	kfree(state->rps_flow_id);
+#endif
 	kfree(state);
 }
+
+#ifdef CONFIG_RFS_ACCEL
+
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+		   u16 rxq_index, u32 flow_id)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	struct efx_channel *channel;
+	struct efx_filter_state *state = efx->filter_state;
+	struct efx_filter_spec spec;
+	const struct iphdr *ip;
+	const __be16 *ports;
+	int nhoff;
+	int rc;
+
+	nhoff = skb_network_offset(skb);
+
+	if (skb->protocol != htons(ETH_P_IP))
+		return -EPROTONOSUPPORT;
+
+	/* RFS must validate the IP header length before calling us */
+	EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + sizeof(*ip)));
+	ip = (const struct iphdr *)(skb->data + nhoff);
+	if (ip->frag_off & htons(IP_MF | IP_OFFSET))
+		return -EPROTONOSUPPORT;
+	EFX_BUG_ON_PARANOID(!pskb_may_pull(skb, nhoff + 4 * ip->ihl + 4));
+	ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+
+	efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
+	rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
+				      ip->daddr, ports[1], ip->saddr, ports[0]);
+	if (rc)
+		return rc;
+
+	rc = efx_filter_insert_filter(efx, &spec, true);
+	if (rc < 0)
+		return rc;
+
+	/* Remember this so we can check whether to expire the filter later */
+	state->rps_flow_id[rc] = flow_id;
+	channel = efx_get_channel(efx, skb_get_rx_queue(skb));
+	++channel->rfs_filters_added;
+
+	netif_info(efx, rx_status, efx->net_dev,
+		   "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+		   (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
+		   &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
+		   rxq_index, flow_id, rc);
+
+	return rc;
+}
+
+bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
+{
+	struct efx_filter_state *state = efx->filter_state;
+	struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
+	unsigned mask = table->size - 1;
+	unsigned index;
+	unsigned stop;
+
+	if (!spin_trylock_bh(&state->lock))
+		return false;
+
+	index = state->rps_expire_index;
+	stop = (index + quota) & mask;
+
+	while (index != stop) {
+		if (test_bit(index, table->used_bitmap) &&
+		    table->spec[index].priority == EFX_FILTER_PRI_HINT &&
+		    rps_may_expire_flow(efx->net_dev,
+					table->spec[index].dmaq_id,
+					state->rps_flow_id[index], index)) {
+			netif_info(efx, rx_status, efx->net_dev,
+				   "expiring filter %d [flow %u]\n",
+				   index, state->rps_flow_id[index]);
+			efx_filter_table_clear_entry(efx, table, index);
+		}
+		index = (index + 1) & mask;
+	}
+
+	state->rps_expire_index = stop;
+	if (table->used == 0)
+		efx_filter_table_reset_search_depth(table);
+
+	spin_unlock_bh(&state->lock);
+	return true;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 28df866..15b9068 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -63,10 +63,12 @@
 /* Checksum generation is a per-queue option in hardware, so each
  * queue visible to the networking core is backed by two hardware TX
  * queues. */
-#define EFX_MAX_CORE_TX_QUEUES	EFX_MAX_CHANNELS
-#define EFX_TXQ_TYPE_OFFLOAD	1
-#define EFX_TXQ_TYPES		2
-#define EFX_MAX_TX_QUEUES	(EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES)
+#define EFX_MAX_TX_TC		2
+#define EFX_MAX_CORE_TX_QUEUES	(EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
+#define EFX_TXQ_TYPE_OFFLOAD	1	/* flag */
+#define EFX_TXQ_TYPE_HIGHPRI	2	/* flag */
+#define EFX_TXQ_TYPES		4
+#define EFX_MAX_TX_QUEUES	(EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
 
 /**
  * struct efx_special_buffer - An Efx special buffer
@@ -140,6 +142,7 @@
  * @buffer: The software buffer ring
  * @txd: The hardware descriptor ring
  * @ptr_mask: The size of the ring minus 1.
+ * @initialised: Has hardware queue been initialised?
  * @flushed: Used when handling queue flushing
  * @read_count: Current read pointer.
  *	This is the number of buffers that have been removed from both rings.
@@ -182,6 +185,7 @@
 	struct efx_tx_buffer *buffer;
 	struct efx_special_buffer txd;
 	unsigned int ptr_mask;
+	bool initialised;
 	enum efx_flush_state flushed;
 
 	/* Members used mainly on the completion path */
@@ -358,6 +362,9 @@
 
 	unsigned int irq_count;
 	unsigned int irq_mod_score;
+#ifdef CONFIG_RFS_ACCEL
+	unsigned int rfs_filters_added;
+#endif
 
 	int rx_alloc_level;
 	int rx_alloc_push_pages;
@@ -377,7 +384,7 @@
 	bool rx_pkt_csummed;
 
 	struct efx_rx_queue rx_queue;
-	struct efx_tx_queue tx_queue[2];
+	struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
 };
 
 enum efx_led_mode {
@@ -906,7 +913,7 @@
 	unsigned int phys_addr_channels;
 	unsigned int tx_dc_base;
 	unsigned int rx_dc_base;
-	unsigned long offload_features;
+	u32 offload_features;
 	u32 reset_world_flags;
 };
 
@@ -938,18 +945,40 @@
 	return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
 }
 
+static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
+{
+	return channel->channel - channel->efx->tx_channel_offset <
+		channel->efx->n_tx_channels;
+}
+
 static inline struct efx_tx_queue *
 efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
 {
-	struct efx_tx_queue *tx_queue = channel->tx_queue;
-	EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES);
-	return tx_queue->channel ? tx_queue + type : NULL;
+	EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
+			    type >= EFX_TXQ_TYPES);
+	return &channel->tx_queue[type];
+}
+
+static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
+{
+	return !(tx_queue->efx->net_dev->num_tc < 2 &&
+		 tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
 }
 
 /* Iterate over all TX queues belonging to a channel */
 #define efx_for_each_channel_tx_queue(_tx_queue, _channel)		\
-	for (_tx_queue = efx_channel_get_tx_queue(channel, 0);		\
-	     _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
+	if (!efx_channel_has_tx_queues(_channel))			\
+		;							\
+	else								\
+		for (_tx_queue = (_channel)->tx_queue;			\
+		     _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
+			     efx_tx_queue_used(_tx_queue);		\
+		     _tx_queue++)
+
+/* Iterate over all possible TX queues belonging to a channel */
+#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel)	\
+	for (_tx_queue = (_channel)->tx_queue;				\
+	     _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES;		\
 	     _tx_queue++)
 
 static inline struct efx_rx_queue *
@@ -959,18 +988,26 @@
 	return &efx->channel[index]->rx_queue;
 }
 
+static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
+{
+	return channel->channel < channel->efx->n_rx_channels;
+}
+
 static inline struct efx_rx_queue *
 efx_channel_get_rx_queue(struct efx_channel *channel)
 {
-	return channel->channel < channel->efx->n_rx_channels ?
-		&channel->rx_queue : NULL;
+	EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
+	return &channel->rx_queue;
 }
 
 /* Iterate over all RX queues belonging to a channel */
 #define efx_for_each_channel_rx_queue(_rx_queue, _channel)		\
-	for (_rx_queue = efx_channel_get_rx_queue(channel);		\
-	     _rx_queue;							\
-	     _rx_queue = NULL)
+	if (!efx_channel_has_rx_queue(_channel))			\
+		;							\
+	else								\
+		for (_rx_queue = &(_channel)->rx_queue;			\
+		     _rx_queue;						\
+		     _rx_queue = NULL)
 
 static inline struct efx_channel *
 efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index da38659..1d0b8b6 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -445,8 +445,8 @@
 
 void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
 {
-	efx_oword_t tx_desc_ptr;
 	struct efx_nic *efx = tx_queue->efx;
+	efx_oword_t reg;
 
 	tx_queue->flushed = FLUSH_NONE;
 
@@ -454,7 +454,7 @@
 	efx_init_special_buffer(efx, &tx_queue->txd);
 
 	/* Push TX descriptor ring to card */
-	EFX_POPULATE_OWORD_10(tx_desc_ptr,
+	EFX_POPULATE_OWORD_10(reg,
 			      FRF_AZ_TX_DESCQ_EN, 1,
 			      FRF_AZ_TX_ISCSI_DDIG_EN, 0,
 			      FRF_AZ_TX_ISCSI_HDIG_EN, 0,
@@ -470,17 +470,15 @@
 
 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
 		int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
-		EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
-		EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
+		EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
 				    !csum);
 	}
 
-	efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
+	efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
 			 tx_queue->queue);
 
 	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
-		efx_oword_t reg;
-
 		/* Only 128 bits in this register */
 		BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
 
@@ -491,6 +489,16 @@
 			set_bit_le(tx_queue->queue, (void *)&reg);
 		efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
 	}
+
+	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+		EFX_POPULATE_OWORD_1(reg,
+				     FRF_BZ_TX_PACE,
+				     (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+				     FFE_BZ_TX_PACE_OFF :
+				     FFE_BZ_TX_PACE_RESERVED);
+		efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
+				 tx_queue->queue);
+	}
 }
 
 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
@@ -1238,8 +1246,10 @@
 
 	/* Flush all tx queues in parallel */
 	efx_for_each_channel(channel, efx) {
-		efx_for_each_channel_tx_queue(tx_queue, channel)
-			efx_flush_tx_queue(tx_queue);
+		efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+			if (tx_queue->initialised)
+				efx_flush_tx_queue(tx_queue);
+		}
 	}
 
 	/* The hardware supports four concurrent rx flushes, each of which may
@@ -1262,8 +1272,9 @@
 					++rx_pending;
 				}
 			}
-			efx_for_each_channel_tx_queue(tx_queue, channel) {
-				if (tx_queue->flushed != FLUSH_DONE)
+			efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+				if (tx_queue->initialised &&
+				    tx_queue->flushed != FLUSH_DONE)
 					++tx_pending;
 			}
 		}
@@ -1278,8 +1289,9 @@
 	/* Mark the queues as all flushed. We're going to return failure
 	 * leading to a reset, or fake up success anyway */
 	efx_for_each_channel(channel, efx) {
-		efx_for_each_channel_tx_queue(tx_queue, channel) {
-			if (tx_queue->flushed != FLUSH_DONE)
+		efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
+			if (tx_queue->initialised &&
+			    tx_queue->flushed != FLUSH_DONE)
 				netif_err(efx, hw, efx->net_dev,
 					  "tx queue %d flush command timed out\n",
 					  tx_queue->queue);
@@ -1682,6 +1694,19 @@
 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
 		EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
 	efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
+
+	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+		EFX_POPULATE_OWORD_4(temp,
+				     /* Default values */
+				     FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
+				     FRF_BZ_TX_PACE_SB_AF, 0xb,
+				     FRF_BZ_TX_PACE_FB_BASE, 0,
+				     /* Allow large pace values in the
+				      * fast bin. */
+				     FRF_BZ_TX_PACE_BIN_TH,
+				     FFE_BZ_TX_PACE_RESERVED);
+		efx_writeo(efx, &temp, FR_BZ_TX_PACE);
+	}
 }
 
 /* Register dump */
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
index 96430ed..8227de6 100644
--- a/drivers/net/sfc/regs.h
+++ b/drivers/net/sfc/regs.h
@@ -2907,6 +2907,12 @@
 #define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
 #define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
 
+/* TX_PACE_TBL */
+/* Values >20 are documented as reserved, but will result in a queue going
+ * into the fast bin with a pace value of zero. */
+#define FFE_BZ_TX_PACE_OFF 0
+#define FFE_BZ_TX_PACE_RESERVED 21
+
 /* DRIVER_EV */
 /* Sub-fields of an RX flush completion event */
 #define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 0ebfb99..f936892 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -644,7 +644,7 @@
 			goto out;
 		}
 
-		/* Test both types of TX queue */
+		/* Test all enabled types of TX queue */
 		efx_for_each_channel_tx_queue(tx_queue, channel) {
 			state->offload_csum = (tx_queue->queue &
 					       EFX_TXQ_TYPE_OFFLOAD);
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 2f5e9da..1a51653 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -336,17 +336,91 @@
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
 	struct efx_tx_queue *tx_queue;
+	unsigned index, type;
 
 	if (unlikely(efx->port_inhibited))
 		return NETDEV_TX_BUSY;
 
-	tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb),
-				    skb->ip_summed == CHECKSUM_PARTIAL ?
-				    EFX_TXQ_TYPE_OFFLOAD : 0);
+	index = skb_get_queue_mapping(skb);
+	type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
+	if (index >= efx->n_tx_channels) {
+		index -= efx->n_tx_channels;
+		type |= EFX_TXQ_TYPE_HIGHPRI;
+	}
+	tx_queue = efx_get_tx_queue(efx, index, type);
 
 	return efx_enqueue_skb(tx_queue, skb);
 }
 
+void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
+{
+	struct efx_nic *efx = tx_queue->efx;
+
+	/* Must be inverse of queue lookup in efx_hard_start_xmit() */
+	tx_queue->core_txq =
+		netdev_get_tx_queue(efx->net_dev,
+				    tx_queue->queue / EFX_TXQ_TYPES +
+				    ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+				     efx->n_tx_channels : 0));
+}
+
+int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	struct efx_channel *channel;
+	struct efx_tx_queue *tx_queue;
+	unsigned tc;
+	int rc;
+
+	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
+		return -EINVAL;
+
+	if (num_tc == net_dev->num_tc)
+		return 0;
+
+	for (tc = 0; tc < num_tc; tc++) {
+		net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
+		net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
+	}
+
+	if (num_tc > net_dev->num_tc) {
+		/* Initialise high-priority queues as necessary */
+		efx_for_each_channel(channel, efx) {
+			efx_for_each_possible_channel_tx_queue(tx_queue,
+							       channel) {
+				if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
+					continue;
+				if (!tx_queue->buffer) {
+					rc = efx_probe_tx_queue(tx_queue);
+					if (rc)
+						return rc;
+				}
+				if (!tx_queue->initialised)
+					efx_init_tx_queue(tx_queue);
+				efx_init_tx_queue_core_txq(tx_queue);
+			}
+		}
+	} else {
+		/* Reduce number of classes before number of queues */
+		net_dev->num_tc = num_tc;
+	}
+
+	rc = netif_set_real_num_tx_queues(net_dev,
+					  max_t(int, num_tc, 1) *
+					  efx->n_tx_channels);
+	if (rc)
+		return rc;
+
+	/* Do not destroy high-priority queues when they become
+	 * unused.  We would have to flush them first, and it is
+	 * fairly difficult to flush a subset of TX queues.  Leave
+	 * it to efx_fini_channels().
+	 */
+
+	net_dev->num_tc = num_tc;
+	return 0;
+}
+
 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
 {
 	unsigned fill_level;
@@ -430,6 +504,8 @@
 
 	/* Set up TX descriptor ring */
 	efx_nic_init_tx(tx_queue);
+
+	tx_queue->initialised = true;
 }
 
 void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
@@ -452,9 +528,14 @@
 
 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
 {
+	if (!tx_queue->initialised)
+		return;
+
 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
 		  "shutting down TX queue %d\n", tx_queue->queue);
 
+	tx_queue->initialised = false;
+
 	/* Flush TX queue, remove descriptor ring */
 	efx_nic_fini_tx(tx_queue);
 
@@ -466,6 +547,9 @@
 
 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
 {
+	if (!tx_queue->buffer)
+		return;
+
 	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
 		  "destroying TX queue %d\n", tx_queue->queue);
 	efx_nic_remove_tx(tx_queue);
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 819c175..095e525 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -32,10 +32,17 @@
 #include <linux/io.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
+#include <linux/ethtool.h>
 #include <asm/cacheflush.h>
 
 #include "sh_eth.h"
 
+#define SH_ETH_DEF_MSG_ENABLE \
+		(NETIF_MSG_LINK	| \
+		NETIF_MSG_TIMER	| \
+		NETIF_MSG_RX_ERR| \
+		NETIF_MSG_TX_ERR)
+
 /* There is CPU dependent code */
 #if defined(CONFIG_CPU_SUBTYPE_SH7724)
 #define SH_ETH_RESET_DEFAULT	1
@@ -817,6 +824,20 @@
 	return 0;
 }
 
+static void sh_eth_rcv_snd_disable(u32 ioaddr)
+{
+	/* disable tx and rx */
+	writel(readl(ioaddr + ECMR) &
+		~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
+}
+
+static void sh_eth_rcv_snd_enable(u32 ioaddr)
+{
+	/* enable tx and rx */
+	writel(readl(ioaddr + ECMR) |
+		(ECMR_RE | ECMR_TE), ioaddr + ECMR);
+}
+
 /* error control function */
 static void sh_eth_error(struct net_device *ndev, int intr_status)
 {
@@ -843,11 +864,9 @@
 				if (mdp->ether_link_active_low)
 					link_stat = ~link_stat;
 			}
-			if (!(link_stat & PHY_ST_LINK)) {
-				/* Link Down : disable tx and rx */
-				writel(readl(ioaddr + ECMR) &
-					  ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
-			} else {
+			if (!(link_stat & PHY_ST_LINK))
+				sh_eth_rcv_snd_disable(ioaddr);
+			else {
 				/* Link Up */
 				writel(readl(ioaddr + EESIPR) &
 					  ~DMAC_M_ECI, ioaddr + EESIPR);
@@ -857,8 +876,7 @@
 				writel(readl(ioaddr + EESIPR) |
 					  DMAC_M_ECI, ioaddr + EESIPR);
 				/* enable tx and rx */
-				writel(readl(ioaddr + ECMR) |
-					  (ECMR_RE | ECMR_TE), ioaddr + ECMR);
+				sh_eth_rcv_snd_enable(ioaddr);
 			}
 		}
 	}
@@ -867,6 +885,8 @@
 		/* Write buck end. unused write back interrupt */
 		if (intr_status & EESR_TABT)	/* Transmit Abort int */
 			mdp->stats.tx_aborted_errors++;
+			if (netif_msg_tx_err(mdp))
+				dev_err(&ndev->dev, "Transmit Abort\n");
 	}
 
 	if (intr_status & EESR_RABT) {
@@ -874,14 +894,23 @@
 		if (intr_status & EESR_RFRMER) {
 			/* Receive Frame Overflow int */
 			mdp->stats.rx_frame_errors++;
-			dev_err(&ndev->dev, "Receive Frame Overflow\n");
+			if (netif_msg_rx_err(mdp))
+				dev_err(&ndev->dev, "Receive Abort\n");
 		}
 	}
 
-	if (!mdp->cd->no_ade) {
-		if (intr_status & EESR_ADE && intr_status & EESR_TDE &&
-		    intr_status & EESR_TFE)
-			mdp->stats.tx_fifo_errors++;
+	if (intr_status & EESR_TDE) {
+		/* Transmit Descriptor Empty int */
+		mdp->stats.tx_fifo_errors++;
+		if (netif_msg_tx_err(mdp))
+			dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
+	}
+
+	if (intr_status & EESR_TFE) {
+		/* FIFO under flow */
+		mdp->stats.tx_fifo_errors++;
+		if (netif_msg_tx_err(mdp))
+			dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
 	}
 
 	if (intr_status & EESR_RDE) {
@@ -890,12 +919,22 @@
 
 		if (readl(ioaddr + EDRRR) ^ EDRRR_R)
 			writel(EDRRR_R, ioaddr + EDRRR);
-		dev_err(&ndev->dev, "Receive Descriptor Empty\n");
+		if (netif_msg_rx_err(mdp))
+			dev_err(&ndev->dev, "Receive Descriptor Empty\n");
 	}
+
 	if (intr_status & EESR_RFE) {
 		/* Receive FIFO Overflow int */
 		mdp->stats.rx_fifo_errors++;
-		dev_err(&ndev->dev, "Receive FIFO Overflow\n");
+		if (netif_msg_rx_err(mdp))
+			dev_err(&ndev->dev, "Receive FIFO Overflow\n");
+	}
+
+	if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
+		/* Address Error */
+		mdp->stats.tx_fifo_errors++;
+		if (netif_msg_tx_err(mdp))
+			dev_err(&ndev->dev, "Address Error\n");
 	}
 
 	mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
@@ -1012,7 +1051,7 @@
 		mdp->duplex = -1;
 	}
 
-	if (new_state)
+	if (new_state && netif_msg_link(mdp))
 		phy_print_status(phydev);
 }
 
@@ -1063,6 +1102,132 @@
 	return 0;
 }
 
+static int sh_eth_get_settings(struct net_device *ndev,
+			struct ethtool_cmd *ecmd)
+{
+	struct sh_eth_private *mdp = netdev_priv(ndev);
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&mdp->lock, flags);
+	ret = phy_ethtool_gset(mdp->phydev, ecmd);
+	spin_unlock_irqrestore(&mdp->lock, flags);
+
+	return ret;
+}
+
+static int sh_eth_set_settings(struct net_device *ndev,
+		struct ethtool_cmd *ecmd)
+{
+	struct sh_eth_private *mdp = netdev_priv(ndev);
+	unsigned long flags;
+	int ret;
+	u32 ioaddr = ndev->base_addr;
+
+	spin_lock_irqsave(&mdp->lock, flags);
+
+	/* disable tx and rx */
+	sh_eth_rcv_snd_disable(ioaddr);
+
+	ret = phy_ethtool_sset(mdp->phydev, ecmd);
+	if (ret)
+		goto error_exit;
+
+	if (ecmd->duplex == DUPLEX_FULL)
+		mdp->duplex = 1;
+	else
+		mdp->duplex = 0;
+
+	if (mdp->cd->set_duplex)
+		mdp->cd->set_duplex(ndev);
+
+error_exit:
+	mdelay(1);
+
+	/* enable tx and rx */
+	sh_eth_rcv_snd_enable(ioaddr);
+
+	spin_unlock_irqrestore(&mdp->lock, flags);
+
+	return ret;
+}
+
+static int sh_eth_nway_reset(struct net_device *ndev)
+{
+	struct sh_eth_private *mdp = netdev_priv(ndev);
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&mdp->lock, flags);
+	ret = phy_start_aneg(mdp->phydev);
+	spin_unlock_irqrestore(&mdp->lock, flags);
+
+	return ret;
+}
+
+static u32 sh_eth_get_msglevel(struct net_device *ndev)
+{
+	struct sh_eth_private *mdp = netdev_priv(ndev);
+	return mdp->msg_enable;
+}
+
+static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
+{
+	struct sh_eth_private *mdp = netdev_priv(ndev);
+	mdp->msg_enable = value;
+}
+
+static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
+	"rx_current", "tx_current",
+	"rx_dirty", "tx_dirty",
+};
+#define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
+
+static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return SH_ETH_STATS_LEN;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void sh_eth_get_ethtool_stats(struct net_device *ndev,
+			struct ethtool_stats *stats, u64 *data)
+{
+	struct sh_eth_private *mdp = netdev_priv(ndev);
+	int i = 0;
+
+	/* device-specific stats */
+	data[i++] = mdp->cur_rx;
+	data[i++] = mdp->cur_tx;
+	data[i++] = mdp->dirty_rx;
+	data[i++] = mdp->dirty_tx;
+}
+
+static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+	switch (stringset) {
+	case ETH_SS_STATS:
+		memcpy(data, *sh_eth_gstrings_stats,
+					sizeof(sh_eth_gstrings_stats));
+		break;
+	}
+}
+
+static struct ethtool_ops sh_eth_ethtool_ops = {
+	.get_settings	= sh_eth_get_settings,
+	.set_settings	= sh_eth_set_settings,
+	.nway_reset		= sh_eth_nway_reset,
+	.get_msglevel	= sh_eth_get_msglevel,
+	.set_msglevel	= sh_eth_set_msglevel,
+	.get_link		= ethtool_op_get_link,
+	.get_strings	= sh_eth_get_strings,
+	.get_ethtool_stats  = sh_eth_get_ethtool_stats,
+	.get_sset_count     = sh_eth_get_sset_count,
+};
+
 /* network device open function */
 static int sh_eth_open(struct net_device *ndev)
 {
@@ -1073,8 +1238,8 @@
 
 	ret = request_irq(ndev->irq, sh_eth_interrupt,
 #if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7764) || \
-    defined(CONFIG_CPU_SUBTYPE_SH7757)
+	defined(CONFIG_CPU_SUBTYPE_SH7764) || \
+	defined(CONFIG_CPU_SUBTYPE_SH7757)
 				IRQF_SHARED,
 #else
 				0,
@@ -1123,8 +1288,8 @@
 
 	netif_stop_queue(ndev);
 
-	/* worning message out. */
-	printk(KERN_WARNING "%s: transmit timed out, status %8.8x,"
+	if (netif_msg_timer(mdp))
+		dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
 	       " resetting...\n", ndev->name, (int)readl(ioaddr + EESR));
 
 	/* tx_errors count up */
@@ -1167,6 +1332,8 @@
 	spin_lock_irqsave(&mdp->lock, flags);
 	if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
 		if (!sh_eth_txfree(ndev)) {
+			if (netif_msg_tx_queued(mdp))
+				dev_warn(&ndev->dev, "TxFD exhausted.\n");
 			netif_stop_queue(ndev);
 			spin_unlock_irqrestore(&mdp->lock, flags);
 			return NETDEV_TX_BUSY;
@@ -1497,8 +1664,11 @@
 
 	/* set function */
 	ndev->netdev_ops = &sh_eth_netdev_ops;
+	SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
 	ndev->watchdog_timeo = TX_TIMEOUT;
 
+	/* debug message level */
+	mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
 	mdp->post_rx = POST_RX >> (devno << 1);
 	mdp->post_fw = POST_FW >> (devno << 1);
 
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 5976d1d..640e368 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1777,6 +1777,7 @@
 					      "cur_rx:%4.4d, dirty_rx:%4.4d\n",
 					      net_dev->name, sis_priv->cur_rx,
 					      sis_priv->dirty_rx);
+				dev_kfree_skb(skb);
 				break;
 			}
 
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 726df61..43654a3 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -81,6 +81,7 @@
 #include <linux/ethtool.h>
 #include <linux/mii.h>
 #include <linux/workqueue.h>
+#include <linux/of.h>
 
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -2394,6 +2395,15 @@
 	return 0;
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id smc91x_match[] = {
+	{ .compatible = "smsc,lan91c94", },
+	{ .compatible = "smsc,lan91c111", },
+	{},
+}
+MODULE_DEVICE_TABLE(of, smc91x_match);
+#endif
+
 static struct dev_pm_ops smc_drv_pm_ops = {
 	.suspend	= smc_drv_suspend,
 	.resume		= smc_drv_resume,
@@ -2406,6 +2416,9 @@
 		.name	= CARDNAME,
 		.owner	= THIS_MODULE,
 		.pm	= &smc_drv_pm_ops,
+#ifdef CONFIG_OF
+		.of_match_table = smc91x_match,
+#endif
 	},
 };
 
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 34a0af3..0e5f031 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1560,8 +1560,10 @@
 
 	priv->hw = device;
 
-	if (device_can_wakeup(priv->device))
+	if (device_can_wakeup(priv->device)) {
 		priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
+		enable_irq_wake(dev->irq);
+	}
 
 	return 0;
 }
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 1c5408f..c1a3448 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -320,28 +320,28 @@
 
 	if (txmac_stat & MAC_TXSTAT_URUN) {
 		netdev_err(dev, "TX MAC xmit underrun\n");
-		gp->net_stats.tx_fifo_errors++;
+		dev->stats.tx_fifo_errors++;
 	}
 
 	if (txmac_stat & MAC_TXSTAT_MPE) {
 		netdev_err(dev, "TX MAC max packet size error\n");
-		gp->net_stats.tx_errors++;
+		dev->stats.tx_errors++;
 	}
 
 	/* The rest are all cases of one of the 16-bit TX
 	 * counters expiring.
 	 */
 	if (txmac_stat & MAC_TXSTAT_NCE)
-		gp->net_stats.collisions += 0x10000;
+		dev->stats.collisions += 0x10000;
 
 	if (txmac_stat & MAC_TXSTAT_ECE) {
-		gp->net_stats.tx_aborted_errors += 0x10000;
-		gp->net_stats.collisions += 0x10000;
+		dev->stats.tx_aborted_errors += 0x10000;
+		dev->stats.collisions += 0x10000;
 	}
 
 	if (txmac_stat & MAC_TXSTAT_LCE) {
-		gp->net_stats.tx_aborted_errors += 0x10000;
-		gp->net_stats.collisions += 0x10000;
+		dev->stats.tx_aborted_errors += 0x10000;
+		dev->stats.collisions += 0x10000;
 	}
 
 	/* We do not keep track of MAC_TXSTAT_FCE and
@@ -469,20 +469,20 @@
 		u32 smac = readl(gp->regs + MAC_SMACHINE);
 
 		netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac);
-		gp->net_stats.rx_over_errors++;
-		gp->net_stats.rx_fifo_errors++;
+		dev->stats.rx_over_errors++;
+		dev->stats.rx_fifo_errors++;
 
 		ret = gem_rxmac_reset(gp);
 	}
 
 	if (rxmac_stat & MAC_RXSTAT_ACE)
-		gp->net_stats.rx_frame_errors += 0x10000;
+		dev->stats.rx_frame_errors += 0x10000;
 
 	if (rxmac_stat & MAC_RXSTAT_CCE)
-		gp->net_stats.rx_crc_errors += 0x10000;
+		dev->stats.rx_crc_errors += 0x10000;
 
 	if (rxmac_stat & MAC_RXSTAT_LCE)
-		gp->net_stats.rx_length_errors += 0x10000;
+		dev->stats.rx_length_errors += 0x10000;
 
 	/* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE
 	 * events.
@@ -594,7 +594,7 @@
 		if (netif_msg_rx_err(gp))
 			printk(KERN_DEBUG "%s: no buffer for rx frame\n",
 				gp->dev->name);
-		gp->net_stats.rx_dropped++;
+		dev->stats.rx_dropped++;
 	}
 
 	if (gem_status & GREG_STAT_RXTAGERR) {
@@ -602,7 +602,7 @@
 		if (netif_msg_rx_err(gp))
 			printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
 				gp->dev->name);
-		gp->net_stats.rx_errors++;
+		dev->stats.rx_errors++;
 
 		goto do_reset;
 	}
@@ -684,7 +684,7 @@
 				break;
 		}
 		gp->tx_skbs[entry] = NULL;
-		gp->net_stats.tx_bytes += skb->len;
+		dev->stats.tx_bytes += skb->len;
 
 		for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
 			txd = &gp->init_block->txd[entry];
@@ -696,7 +696,7 @@
 			entry = NEXT_TX(entry);
 		}
 
-		gp->net_stats.tx_packets++;
+		dev->stats.tx_packets++;
 		dev_kfree_skb_irq(skb);
 	}
 	gp->tx_old = entry;
@@ -738,6 +738,7 @@
 
 static int gem_rx(struct gem *gp, int work_to_do)
 {
+	struct net_device *dev = gp->dev;
 	int entry, drops, work_done = 0;
 	u32 done;
 	__sum16 csum;
@@ -782,15 +783,15 @@
 
 		len = (status & RXDCTRL_BUFSZ) >> 16;
 		if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) {
-			gp->net_stats.rx_errors++;
+			dev->stats.rx_errors++;
 			if (len < ETH_ZLEN)
-				gp->net_stats.rx_length_errors++;
+				dev->stats.rx_length_errors++;
 			if (len & RXDCTRL_BAD)
-				gp->net_stats.rx_crc_errors++;
+				dev->stats.rx_crc_errors++;
 
 			/* We'll just return it to GEM. */
 		drop_it:
-			gp->net_stats.rx_dropped++;
+			dev->stats.rx_dropped++;
 			goto next;
 		}
 
@@ -843,8 +844,8 @@
 
 		netif_receive_skb(skb);
 
-		gp->net_stats.rx_packets++;
-		gp->net_stats.rx_bytes += len;
+		dev->stats.rx_packets++;
+		dev->stats.rx_bytes += len;
 
 	next:
 		entry = NEXT_RX(entry);
@@ -2472,7 +2473,6 @@
 static struct net_device_stats *gem_get_stats(struct net_device *dev)
 {
 	struct gem *gp = netdev_priv(dev);
-	struct net_device_stats *stats = &gp->net_stats;
 
 	spin_lock_irq(&gp->lock);
 	spin_lock(&gp->tx_lock);
@@ -2481,17 +2481,17 @@
 	 * so we shield against this
 	 */
 	if (gp->running) {
-		stats->rx_crc_errors += readl(gp->regs + MAC_FCSERR);
+		dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR);
 		writel(0, gp->regs + MAC_FCSERR);
 
-		stats->rx_frame_errors += readl(gp->regs + MAC_AERR);
+		dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR);
 		writel(0, gp->regs + MAC_AERR);
 
-		stats->rx_length_errors += readl(gp->regs + MAC_LERR);
+		dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR);
 		writel(0, gp->regs + MAC_LERR);
 
-		stats->tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
-		stats->collisions +=
+		dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL);
+		dev->stats.collisions +=
 			(readl(gp->regs + MAC_ECOLL) +
 			 readl(gp->regs + MAC_LCOLL));
 		writel(0, gp->regs + MAC_ECOLL);
@@ -2501,7 +2501,7 @@
 	spin_unlock(&gp->tx_lock);
 	spin_unlock_irq(&gp->lock);
 
-	return &gp->net_stats;
+	return &dev->stats;
 }
 
 static int gem_set_mac_address(struct net_device *dev, void *addr)
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h
index 1990546..ede0178 100644
--- a/drivers/net/sungem.h
+++ b/drivers/net/sungem.h
@@ -994,7 +994,6 @@
 	u32			status;
 
 	struct napi_struct	napi;
-	struct net_device_stats net_stats;
 
 	int			tx_fifo_sz;
 	int			rx_fifo_sz;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7841a8f..6be4185 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -4,7 +4,7 @@
  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
  * Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2010 Broadcom Corporation.
+ * Copyright (C) 2005-2011 Broadcom Corporation.
  *
  * Firmware is:
  *	Derived from proprietary unpublished source code,
@@ -60,20 +60,14 @@
 #define BAR_0	0
 #define BAR_2	2
 
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-#define TG3_VLAN_TAG_USED 1
-#else
-#define TG3_VLAN_TAG_USED 0
-#endif
-
 #include "tg3.h"
 
 #define DRV_MODULE_NAME		"tg3"
 #define TG3_MAJ_NUM			3
-#define TG3_MIN_NUM			116
+#define TG3_MIN_NUM			117
 #define DRV_MODULE_VERSION	\
 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE	"December 3, 2010"
+#define DRV_MODULE_RELDATE	"January 25, 2011"
 
 #define TG3_DEF_MAC_MODE	0
 #define TG3_DEF_RX_MODE		0
@@ -134,9 +128,6 @@
 				 TG3_TX_RING_SIZE)
 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
 
-#define TG3_RX_DMA_ALIGN		16
-#define TG3_RX_HEADROOM			ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
-
 #define TG3_DMA_BYTE_ENAB		64
 
 #define TG3_RX_STD_DMA_SZ		1536
@@ -1785,9 +1776,29 @@
 		tg3_phy_cl45_read(tp, MDIO_MMD_AN,
 				  TG3_CL45_D7_EEERES_STAT, &val);
 
-		if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
-		    val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
+		switch (val) {
+		case TG3_CL45_D7_EEERES_STAT_LP_1000T:
+			switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+			case ASIC_REV_5717:
+			case ASIC_REV_5719:
+			case ASIC_REV_57765:
+				/* Enable SM_DSP clock and tx 6dB coding. */
+				val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
+				      MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
+				      MII_TG3_AUXCTL_ACTL_TX_6DB;
+				tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+
+				tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
+
+				/* Turn off SM_DSP clock. */
+				val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
+				      MII_TG3_AUXCTL_ACTL_TX_6DB;
+				tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+			}
+			/* Fallthrough */
+		case TG3_CL45_D7_EEERES_STAT_LP_100TX:
 			tp->setlpicnt = 2;
+		}
 	}
 
 	if (!tp->setlpicnt) {
@@ -2977,11 +2988,19 @@
 		      MII_TG3_AUXCTL_ACTL_TX_6DB;
 		tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
 
-		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
-		    !tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
-			tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2,
-					 val | MII_TG3_DSP_CH34TP2_HIBW01);
+		switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+		case ASIC_REV_5717:
+		case ASIC_REV_57765:
+			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
+				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
+						 MII_TG3_DSP_CH34TP2_HIBW01);
+			/* Fall through */
+		case ASIC_REV_5719:
+			val = MII_TG3_DSP_TAP26_ALNOKO |
+			      MII_TG3_DSP_TAP26_RMRXSTO |
+			      MII_TG3_DSP_TAP26_OPCSINPT;
+			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
+		}
 
 		val = 0;
 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
@@ -4722,8 +4741,6 @@
 		struct sk_buff *skb;
 		dma_addr_t dma_addr;
 		u32 opaque_key, desc_idx, *post_ptr;
-		bool hw_vlan __maybe_unused = false;
-		u16 vtag __maybe_unused = 0;
 
 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
@@ -4782,12 +4799,12 @@
 			tg3_recycle_rx(tnapi, tpr, opaque_key,
 				       desc_idx, *post_ptr);
 
-			copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
+			copy_skb = netdev_alloc_skb(tp->dev, len +
 						    TG3_RAW_IP_ALIGN);
 			if (copy_skb == NULL)
 				goto drop_it_no_recycle;
 
-			skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
+			skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
 			skb_put(copy_skb, len);
 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
 			skb_copy_from_linear_data(skb, copy_skb->data, len);
@@ -4814,30 +4831,11 @@
 		}
 
 		if (desc->type_flags & RXD_FLAG_VLAN &&
-		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
-			vtag = desc->err_vlan & RXD_VLAN_MASK;
-#if TG3_VLAN_TAG_USED
-			if (tp->vlgrp)
-				hw_vlan = true;
-			else
-#endif
-			{
-				struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
-						    __skb_push(skb, VLAN_HLEN);
+		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
+			__vlan_hwaccel_put_tag(skb,
+					       desc->err_vlan & RXD_VLAN_MASK);
 
-				memmove(ve, skb->data + VLAN_HLEN,
-					ETH_ALEN * 2);
-				ve->h_vlan_proto = htons(ETH_P_8021Q);
-				ve->h_vlan_TCI = htons(vtag);
-			}
-		}
-
-#if TG3_VLAN_TAG_USED
-		if (hw_vlan)
-			vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
-		else
-#endif
-			napi_gro_receive(&tnapi->napi, skb);
+		napi_gro_receive(&tnapi->napi, skb);
 
 		received++;
 		budget--;
@@ -5740,11 +5738,9 @@
 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
 	}
 
-#if TG3_VLAN_TAG_USED
 	if (vlan_tx_tag_present(skb))
 		base_flags |= (TXD_FLAG_VLAN |
 			       (vlan_tx_tag_get(skb) << 16));
-#endif
 
 	len = skb_headlen(skb);
 
@@ -5986,11 +5982,10 @@
 			}
 		}
 	}
-#if TG3_VLAN_TAG_USED
+
 	if (vlan_tx_tag_present(skb))
 		base_flags |= (TXD_FLAG_VLAN |
 			       (vlan_tx_tag_get(skb) << 16));
-#endif
 
 	if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
@@ -7834,7 +7829,7 @@
 		       TG3_CPMU_DBTMR1_LNKIDLE_2047US);
 
 		tw32_f(TG3_CPMU_EEE_DBTMR2,
-		       TG3_CPMU_DBTMR1_APE_TX_2047US |
+		       TG3_CPMU_DBTMR2_APE_TX_2047US |
 		       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
 	}
 
@@ -8108,8 +8103,9 @@
 	/* Program the jumbo buffer descriptor ring control
 	 * blocks on those devices that have them.
 	 */
-	if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
-	    !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+	if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
+	    ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
+	    !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) {
 		/* Setup replenish threshold. */
 		tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
 
@@ -8227,8 +8223,12 @@
 	    (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
 		val = tr32(TG3_RDMA_RSRVCTRL_REG);
 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
-			val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK;
-			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B;
+			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
+				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
+				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
+			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
+			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
+			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
 		}
 		tw32(TG3_RDMA_RSRVCTRL_REG,
 		     val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
@@ -8350,7 +8350,8 @@
 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
 	udelay(100);
 
-	if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
+	if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
+		tp->irq_cnt > 1) {
 		val = tr32(MSGINT_MODE);
 		val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
 		tw32(MSGINT_MODE, val);
@@ -9090,7 +9091,8 @@
 
 	if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
 		u32 msi_mode = tr32(MSGINT_MODE);
-		if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+		if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
+		    tp->irq_cnt > 1)
 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
 	}
@@ -9532,17 +9534,10 @@
 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
 				  RX_MODE_KEEP_VLAN_TAG);
 
+#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
 	 * flag clear.
 	 */
-#if TG3_VLAN_TAG_USED
-	if (!tp->vlgrp &&
-	    !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
-		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
-#else
-	/* By definition, VLAN is disabled always in this
-	 * case.
-	 */
 	if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
 #endif
@@ -10873,13 +10868,16 @@
 	if (loopback_mode == TG3_MAC_LOOPBACK) {
 		/* HW errata - mac loopback fails in some cases on 5780.
 		 * Normal traffic and PHY loopback are not affected by
-		 * errata.
+		 * errata.  Also, the MAC loopback test is deprecated for
+		 * all newer ASIC revisions.
 		 */
-		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
+		    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
 			return 0;
 
-		mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
-			   MAC_MODE_PORT_INT_LPBACK;
+		mac_mode = tp->mac_mode &
+			   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
+		mac_mode |= MAC_MODE_PORT_INT_LPBACK;
 		if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
 			mac_mode |= MAC_MODE_LINK_POLARITY;
 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
@@ -10901,7 +10899,8 @@
 		tg3_writephy(tp, MII_BMCR, val);
 		udelay(40);
 
-		mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
+		mac_mode = tp->mac_mode &
+			   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
 			tg3_writephy(tp, MII_TG3_FET_PTEST,
 				     MII_TG3_FET_PTEST_FRC_TX_LINK |
@@ -10929,6 +10928,13 @@
 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
 		}
 		tw32(MAC_MODE, mac_mode);
+
+		/* Wait for link */
+		for (i = 0; i < 100; i++) {
+			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+				break;
+			mdelay(1);
+		}
 	} else {
 		return -EINVAL;
 	}
@@ -11035,14 +11041,19 @@
 static int tg3_test_loopback(struct tg3 *tp)
 {
 	int err = 0;
-	u32 cpmuctrl = 0;
+	u32 eee_cap, cpmuctrl = 0;
 
 	if (!netif_running(tp->dev))
 		return TG3_LOOPBACK_FAILED;
 
+	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
+	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
+
 	err = tg3_reset_hw(tp, 1);
-	if (err)
-		return TG3_LOOPBACK_FAILED;
+	if (err) {
+		err = TG3_LOOPBACK_FAILED;
+		goto done;
+	}
 
 	/* Turn off gphy autopowerdown. */
 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
@@ -11062,8 +11073,10 @@
 			udelay(10);
 		}
 
-		if (status != CPMU_MUTEX_GNT_DRIVER)
-			return TG3_LOOPBACK_FAILED;
+		if (status != CPMU_MUTEX_GNT_DRIVER) {
+			err = TG3_LOOPBACK_FAILED;
+			goto done;
+		}
 
 		/* Turn off link-based power management. */
 		cpmuctrl = tr32(TG3_CPMU_CTRL);
@@ -11092,6 +11105,9 @@
 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
 		tg3_phy_toggle_apd(tp, true);
 
+done:
+	tp->phy_flags |= eee_cap;
+
 	return err;
 }
 
@@ -11198,7 +11214,9 @@
 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
 			break;			/* We have no PHY */
 
-		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
+		    ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+		     !netif_running(dev)))
 			return -EAGAIN;
 
 		spin_lock_bh(&tp->lock);
@@ -11214,7 +11232,9 @@
 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
 			break;			/* We have no PHY */
 
-		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
+		    ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+		     !netif_running(dev)))
 			return -EAGAIN;
 
 		spin_lock_bh(&tp->lock);
@@ -11230,31 +11250,6 @@
 	return -EOPNOTSUPP;
 }
 
-#if TG3_VLAN_TAG_USED
-static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
-	struct tg3 *tp = netdev_priv(dev);
-
-	if (!netif_running(dev)) {
-		tp->vlgrp = grp;
-		return;
-	}
-
-	tg3_netif_stop(tp);
-
-	tg3_full_lock(tp, 0);
-
-	tp->vlgrp = grp;
-
-	/* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
-	__tg3_set_rx_mode(dev);
-
-	tg3_netif_start(tp);
-
-	tg3_full_unlock(tp);
-}
-#endif
-
 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
 {
 	struct tg3 *tp = netdev_priv(dev);
@@ -12468,9 +12463,11 @@
 			tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
 	}
 done:
-	device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
-	device_set_wakeup_enable(&tp->pdev->dev,
+	if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
+		device_set_wakeup_enable(&tp->pdev->dev,
 				 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
+	else
+		device_set_wakeup_capable(&tp->pdev->dev, false);
 }
 
 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
@@ -13066,9 +13063,7 @@
 
 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
 {
-#if TG3_VLAN_TAG_USED
 	dev->vlan_features |= flags;
-#endif
 }
 
 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
@@ -13325,7 +13320,9 @@
 	}
 
 	/* Determine TSO capabilities */
-	if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+		; /* Do nothing. HW bug. */
+	else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
 		tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
 	else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
 		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -13376,7 +13373,8 @@
 		tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
 	}
 
-	if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
+	if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
+	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
 		tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
 
 	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
@@ -13394,42 +13392,8 @@
 		tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
 
 		tp->pcie_readrq = 4096;
-		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
-			u16 word;
-
-			pci_read_config_word(tp->pdev,
-					     tp->pcie_cap + PCI_EXP_LNKSTA,
-					     &word);
-			switch (word & PCI_EXP_LNKSTA_CLS) {
-			case PCI_EXP_LNKSTA_CLS_2_5GB:
-				word &= PCI_EXP_LNKSTA_NLW;
-				word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
-				switch (word) {
-				case 2:
-					tp->pcie_readrq = 2048;
-					break;
-				case 4:
-					tp->pcie_readrq = 1024;
-					break;
-				}
-				break;
-
-			case PCI_EXP_LNKSTA_CLS_5_0GB:
-				word &= PCI_EXP_LNKSTA_NLW;
-				word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
-				switch (word) {
-				case 1:
-					tp->pcie_readrq = 2048;
-					break;
-				case 2:
-					tp->pcie_readrq = 1024;
-					break;
-				case 4:
-					tp->pcie_readrq = 512;
-					break;
-				}
-			}
-		}
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+			tp->pcie_readrq = 2048;
 
 		pcie_set_readrq(tp->pdev, tp->pcie_readrq);
 
@@ -13861,11 +13825,11 @@
 	else
 		tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
 
-	tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM;
+	tp->rx_offset = NET_IP_ALIGN;
 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
 	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
-		tp->rx_offset -= NET_IP_ALIGN;
+		tp->rx_offset = 0;
 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 		tp->rx_copy_thresh = ~(u16)0;
 #endif
@@ -14629,9 +14593,6 @@
 	.ndo_do_ioctl		= tg3_ioctl,
 	.ndo_tx_timeout		= tg3_tx_timeout,
 	.ndo_change_mtu		= tg3_change_mtu,
-#if TG3_VLAN_TAG_USED
-	.ndo_vlan_rx_register	= tg3_vlan_rx_register,
-#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= tg3_poll_controller,
 #endif
@@ -14648,9 +14609,6 @@
 	.ndo_do_ioctl		= tg3_ioctl,
 	.ndo_tx_timeout		= tg3_tx_timeout,
 	.ndo_change_mtu		= tg3_change_mtu,
-#if TG3_VLAN_TAG_USED
-	.ndo_vlan_rx_register	= tg3_vlan_rx_register,
-#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= tg3_poll_controller,
 #endif
@@ -14700,9 +14658,7 @@
 
 	SET_NETDEV_DEV(dev, &pdev->dev);
 
-#if TG3_VLAN_TAG_USED
 	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-#endif
 
 	tp = netdev_priv(dev);
 	tp->pdev = pdev;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index d62c8d9..73884b6 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -4,7 +4,7 @@
  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
  * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
  * Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2010 Broadcom Corporation.
+ * Copyright (C) 2007-2011 Broadcom Corporation.
  */
 
 #ifndef _T3_H
@@ -141,6 +141,7 @@
 #define  CHIPREV_ID_57780_A1		 0x57780001
 #define  CHIPREV_ID_5717_A0		 0x05717000
 #define  CHIPREV_ID_57765_A0		 0x57785000
+#define  CHIPREV_ID_5719_A0		 0x05719000
 #define  GET_ASIC_REV(CHIP_REV_ID)	((CHIP_REV_ID) >> 12)
 #define   ASIC_REV_5700			 0x07
 #define   ASIC_REV_5701			 0x00
@@ -1105,7 +1106,7 @@
 #define  TG3_CPMU_DBTMR1_PCIEXIT_2047US	 0x07ff0000
 #define  TG3_CPMU_DBTMR1_LNKIDLE_2047US	 0x000070ff
 #define TG3_CPMU_EEE_DBTMR2		0x000036b8
-#define  TG3_CPMU_DBTMR1_APE_TX_2047US	 0x07ff0000
+#define  TG3_CPMU_DBTMR2_APE_TX_2047US	 0x07ff0000
 #define  TG3_CPMU_DBTMR2_TXIDXEQ_2047US	 0x000070ff
 #define TG3_CPMU_EEE_LNKIDL_CTRL	0x000036bc
 #define  TG3_CPMU_EEE_LNKIDL_PCIE_NL0	 0x01000000
@@ -1333,6 +1334,10 @@
 
 #define TG3_RDMA_RSRVCTRL_REG		0x00004900
 #define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX	 0x00000004
+#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K	 0x00000c00
+#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK	 0x00000ff0
+#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K	 0x000c0000
+#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK	 0x000ff000
 #define TG3_RDMA_RSRVCTRL_TXMRGN_320B	 0x28000000
 #define TG3_RDMA_RSRVCTRL_TXMRGN_MASK	 0xffe00000
 /* 0x4904 --> 0x4910 unused */
@@ -2108,6 +2113,10 @@
 
 #define MII_TG3_DSP_TAP1		0x0001
 #define  MII_TG3_DSP_TAP1_AGCTGT_DFLT	0x0007
+#define MII_TG3_DSP_TAP26		0x001a
+#define  MII_TG3_DSP_TAP26_ALNOKO	0x0001
+#define  MII_TG3_DSP_TAP26_RMRXSTO	0x0002
+#define  MII_TG3_DSP_TAP26_OPCSINPT	0x0004
 #define MII_TG3_DSP_AADJ1CH0		0x001f
 #define MII_TG3_DSP_CH34TP2		0x4022
 #define MII_TG3_DSP_CH34TP2_HIBW01	0x0010
@@ -2808,9 +2817,6 @@
 	u32				rx_std_max_post;
 	u32				rx_offset;
 	u32				rx_pkt_map_sz;
-#if TG3_VLAN_TAG_USED
-	struct vlan_group		*vlgrp;
-#endif
 
 
 	/* begin "everything else" cacheline(s) section */
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index f8e463c..e48a808 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -63,45 +63,45 @@
  *			     - Other minor stuff
  *
  *	v1.4 Feb 10, 2000    - Updated with more changes required after Dave's
- *	                       network cleanup in 2.3.43pre7 (Tigran & myself)
- *	                     - Minor stuff.
+ *			       network cleanup in 2.3.43pre7 (Tigran & myself)
+ *			     - Minor stuff.
  *
- *	v1.5 March 22, 2000  - Fixed another timer bug that would hang the driver
- *			       if no cable/link were present.
+ *	v1.5 March 22, 2000  - Fixed another timer bug that would hang the
+ *			       driver if no cable/link were present.
  *			     - Cosmetic changes.
  *			     - TODO: Port completely to new PCI/DMA API
- *			     	     Auto-Neg fallback.
+ *				     Auto-Neg fallback.
  *
- * 	v1.6 April 04, 2000  - Fixed driver support for kernel-parameters. Haven't
- * 			       tested it though, as the kernel support is currently
- * 			       broken (2.3.99p4p3).
- * 			     - Updated tlan.txt accordingly.
- * 			     - Adjusted minimum/maximum frame length.
- * 			     - There is now a TLAN website up at
- * 			       http://hp.sourceforge.net/ 
+ *	v1.6 April 04, 2000  - Fixed driver support for kernel-parameters.
+ *			       Haven't tested it though, as the kernel support
+ *			       is currently broken (2.3.99p4p3).
+ *			     - Updated tlan.txt accordingly.
+ *			     - Adjusted minimum/maximum frame length.
+ *			     - There is now a TLAN website up at
+ *			       http://hp.sourceforge.net/
  *
- * 	v1.7 April 07, 2000  - Started to implement custom ioctls. Driver now
- * 			       reports PHY information when used with Donald
- * 			       Beckers userspace MII diagnostics utility.
+ *	v1.7 April 07, 2000  - Started to implement custom ioctls. Driver now
+ *			       reports PHY information when used with Donald
+ *			       Beckers userspace MII diagnostics utility.
  *
- * 	v1.8 April 23, 2000  - Fixed support for forced speed/duplex settings.
- * 			     - Added link information to Auto-Neg and forced
- * 			       modes. When NIC operates with auto-neg the driver
- * 			       will report Link speed & duplex modes as well as
- * 			       link partner abilities. When forced link is used,
- * 			       the driver will report status of the established
- * 			       link.
- * 			       Please read tlan.txt for additional information.
- * 			     - Removed call to check_region(), and used
- * 			       return value of request_region() instead.
+ *	v1.8 April 23, 2000  - Fixed support for forced speed/duplex settings.
+ *			     - Added link information to Auto-Neg and forced
+ *			       modes. When NIC operates with auto-neg the driver
+ *			       will report Link speed & duplex modes as well as
+ *			       link partner abilities. When forced link is used,
+ *			       the driver will report status of the established
+ *			       link.
+ *			       Please read tlan.txt for additional information.
+ *			     - Removed call to check_region(), and used
+ *			       return value of request_region() instead.
  *
  *	v1.8a May 28, 2000   - Minor updates.
  *
  *	v1.9 July 25, 2000   - Fixed a few remaining Full-Duplex issues.
- *	                     - Updated with timer fixes from Andrew Morton.
- *	                     - Fixed module race in TLan_Open.
- *	                     - Added routine to monitor PHY status.
- *	                     - Added activity led support for Proliant devices.
+ *			     - Updated with timer fixes from Andrew Morton.
+ *			     - Fixed module race in TLan_Open.
+ *			     - Added routine to monitor PHY status.
+ *			     - Added activity led support for Proliant devices.
  *
  *	v1.10 Aug 30, 2000   - Added support for EISA based tlan controllers
  *			       like the Compaq NetFlex3/E.
@@ -111,8 +111,8 @@
  *			       hardware probe is done with kernel API and
  *			       TLan_EisaProbe.
  *			     - Adjusted debug information for probing.
- *			     - Fixed bug that would cause general debug information
- *			       to be printed after driver removal.
+ *			     - Fixed bug that would cause general debug
+ *			       information to be printed after driver removal.
  *			     - Added transmit timeout handling.
  *			     - Fixed OOM return values in tlan_probe.
  *			     - Fixed possible mem leak in tlan_exit
@@ -136,8 +136,8 @@
  *
  *	v1.12 Oct 12, 2000   - Minor fixes (memleak, init, etc.)
  *
- * 	v1.13 Nov 28, 2000   - Stop flooding console with auto-neg issues
- * 			       when link can't be established.
+ *	v1.13 Nov 28, 2000   - Stop flooding console with auto-neg issues
+ *			       when link can't be established.
  *			     - Added the bbuf option as a kernel parameter.
  *			     - Fixed ioaddr probe bug.
  *			     - Fixed stupid deadlock with MII interrupts.
@@ -147,28 +147,30 @@
  *			       TLAN v1.0 silicon. This needs to be investigated
  *			       further.
  *
- * 	v1.14 Dec 16, 2000   - Added support for servicing multiple frames per.
- * 			       interrupt. Thanks goes to
- * 			       Adam Keys <adam@ti.com>
- * 			       Denis Beaudoin <dbeaudoin@ti.com>
- * 			       for providing the patch.
- * 			     - Fixed auto-neg output when using multiple
- * 			       adapters.
- * 			     - Converted to use new taskq interface.
+ *	v1.14 Dec 16, 2000   - Added support for servicing multiple frames per.
+ *			       interrupt. Thanks goes to
+ *			       Adam Keys <adam@ti.com>
+ *			       Denis Beaudoin <dbeaudoin@ti.com>
+ *			       for providing the patch.
+ *			     - Fixed auto-neg output when using multiple
+ *			       adapters.
+ *			     - Converted to use new taskq interface.
  *
- * 	v1.14a Jan 6, 2001   - Minor adjustments (spinlocks, etc.)
+ *	v1.14a Jan 6, 2001   - Minor adjustments (spinlocks, etc.)
  *
  *	Samuel Chessman <chessman@tux.org> New Maintainer!
  *
  *	v1.15 Apr 4, 2002    - Correct operation when aui=1 to be
- *	                       10T half duplex no loopback
- *	                       Thanks to Gunnar Eikman
+ *			       10T half duplex no loopback
+ *			       Thanks to Gunnar Eikman
  *
  *	Sakari Ailus <sakari.ailus@iki.fi>:
  *
  *	v1.15a Dec 15 2008   - Remove bbuf support, it doesn't work anyway.
+ *	v1.16  Jan 6  2011   - Make checkpatch.pl happy.
+ *	v1.17  Jan 6  2011   - Add suspend/resume support.
  *
- *******************************************************************************/
+ ******************************************************************************/
 
 #include <linux/module.h>
 #include <linux/init.h>
@@ -185,13 +187,11 @@
 
 #include "tlan.h"
 
-typedef u32 (TLanIntVectorFunc)( struct net_device *, u16 );
-
 
 /* For removing EISA devices */
-static	struct net_device	*TLan_Eisa_Devices;
+static	struct net_device	*tlan_eisa_devices;
 
-static	int		TLanDevicesInstalled;
+static	int		tlan_devices_installed;
 
 /* Set speed, duplex and aui settings */
 static  int aui[MAX_TLAN_BOARDS];
@@ -202,7 +202,8 @@
 module_param_array(duplex, int, NULL, 0);
 module_param_array(speed, int, NULL, 0);
 MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
-MODULE_PARM_DESC(duplex, "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
+MODULE_PARM_DESC(duplex,
+		 "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
 MODULE_PARM_DESC(speed, "ThunderLAN port speen setting(s) (0,10,100)");
 
 MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
@@ -218,139 +219,144 @@
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
 
-static	const char TLanSignature[] = "TLAN";
-static  const char tlan_banner[] = "ThunderLAN driver v1.15a\n";
+static	const char tlan_signature[] = "TLAN";
+static  const char tlan_banner[] = "ThunderLAN driver v1.17\n";
 static  int tlan_have_pci;
 static  int tlan_have_eisa;
 
-static const char *media[] = {
-	"10BaseT-HD ", "10BaseT-FD ","100baseTx-HD ",
-	"100baseTx-FD", "100baseT4", NULL
+static const char * const media[] = {
+	"10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
+	"100BaseTx-FD", "100BaseT4", NULL
 };
 
 static struct board {
-	const char	*deviceLabel;
-	u32	   	flags;
-	u16	   	addrOfs;
+	const char	*device_label;
+	u32		flags;
+	u16		addr_ofs;
 } board_info[] = {
 	{ "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
-	{ "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+	{ "Compaq Netelligent 10/100 TX PCI UTP",
+	  TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
 	{ "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
 	{ "Compaq NetFlex-3/P",
 	  TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
 	{ "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
 	{ "Compaq Netelligent Integrated 10/100 TX UTP",
 	  TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
-	{ "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 },
-	{ "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 },
+	{ "Compaq Netelligent Dual 10/100 TX PCI UTP",
+	  TLAN_ADAPTER_NONE, 0x83 },
+	{ "Compaq Netelligent 10/100 TX Embedded UTP",
+	  TLAN_ADAPTER_NONE, 0x83 },
 	{ "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
-	{ "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xF8 },
-	{ "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 },
+	{ "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
+	{ "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
 	{ "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
-	{ "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 },
+	{ "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
 	{ "Compaq NetFlex-3/E",
-	  TLAN_ADAPTER_ACTIVITY_LED | 	/* EISA card */
+	  TLAN_ADAPTER_ACTIVITY_LED |	/* EISA card */
 	  TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
-	{ "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
+	{ "Compaq NetFlex-3/E",
+	  TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
 };
 
 static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
 	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
 	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
 	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
 	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
 	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
 	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
 	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
 	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
 	{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
 	{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
 	{ PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
 	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
 	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
-		PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
 	{ 0,}
 };
 MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
 
-static void	TLan_EisaProbe( void );
-static void	TLan_Eisa_Cleanup( void );
-static int      TLan_Init( struct net_device * );
-static int	TLan_Open( struct net_device *dev );
-static netdev_tx_t TLan_StartTx( struct sk_buff *, struct net_device *);
-static irqreturn_t TLan_HandleInterrupt( int, void *);
-static int	TLan_Close( struct net_device *);
-static struct	net_device_stats *TLan_GetStats( struct net_device *);
-static void	TLan_SetMulticastList( struct net_device *);
-static int	TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd);
-static int      TLan_probe1( struct pci_dev *pdev, long ioaddr,
-			     int irq, int rev, const struct pci_device_id *ent);
-static void	TLan_tx_timeout( struct net_device *dev);
-static void	TLan_tx_timeout_work(struct work_struct *work);
-static int 	tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent);
+static void	tlan_eisa_probe(void);
+static void	tlan_eisa_cleanup(void);
+static int      tlan_init(struct net_device *);
+static int	tlan_open(struct net_device *dev);
+static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
+static irqreturn_t tlan_handle_interrupt(int, void *);
+static int	tlan_close(struct net_device *);
+static struct	net_device_stats *tlan_get_stats(struct net_device *);
+static void	tlan_set_multicast_list(struct net_device *);
+static int	tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int      tlan_probe1(struct pci_dev *pdev, long ioaddr,
+			    int irq, int rev, const struct pci_device_id *ent);
+static void	tlan_tx_timeout(struct net_device *dev);
+static void	tlan_tx_timeout_work(struct work_struct *work);
+static int	tlan_init_one(struct pci_dev *pdev,
+			      const struct pci_device_id *ent);
 
-static u32	TLan_HandleTxEOF( struct net_device *, u16 );
-static u32	TLan_HandleStatOverflow( struct net_device *, u16 );
-static u32	TLan_HandleRxEOF( struct net_device *, u16 );
-static u32	TLan_HandleDummy( struct net_device *, u16 );
-static u32	TLan_HandleTxEOC( struct net_device *, u16 );
-static u32	TLan_HandleStatusCheck( struct net_device *, u16 );
-static u32	TLan_HandleRxEOC( struct net_device *, u16 );
+static u32	tlan_handle_tx_eof(struct net_device *, u16);
+static u32	tlan_handle_stat_overflow(struct net_device *, u16);
+static u32	tlan_handle_rx_eof(struct net_device *, u16);
+static u32	tlan_handle_dummy(struct net_device *, u16);
+static u32	tlan_handle_tx_eoc(struct net_device *, u16);
+static u32	tlan_handle_status_check(struct net_device *, u16);
+static u32	tlan_handle_rx_eoc(struct net_device *, u16);
 
-static void	TLan_Timer( unsigned long );
+static void	tlan_timer(unsigned long);
 
-static void	TLan_ResetLists( struct net_device * );
-static void	TLan_FreeLists( struct net_device * );
-static void	TLan_PrintDio( u16 );
-static void	TLan_PrintList( TLanList *, char *, int );
-static void	TLan_ReadAndClearStats( struct net_device *, int );
-static void	TLan_ResetAdapter( struct net_device * );
-static void	TLan_FinishReset( struct net_device * );
-static void	TLan_SetMac( struct net_device *, int areg, char *mac );
+static void	tlan_reset_lists(struct net_device *);
+static void	tlan_free_lists(struct net_device *);
+static void	tlan_print_dio(u16);
+static void	tlan_print_list(struct tlan_list *, char *, int);
+static void	tlan_read_and_clear_stats(struct net_device *, int);
+static void	tlan_reset_adapter(struct net_device *);
+static void	tlan_finish_reset(struct net_device *);
+static void	tlan_set_mac(struct net_device *, int areg, char *mac);
 
-static void	TLan_PhyPrint( struct net_device * );
-static void	TLan_PhyDetect( struct net_device * );
-static void	TLan_PhyPowerDown( struct net_device * );
-static void	TLan_PhyPowerUp( struct net_device * );
-static void	TLan_PhyReset( struct net_device * );
-static void	TLan_PhyStartLink( struct net_device * );
-static void	TLan_PhyFinishAutoNeg( struct net_device * );
+static void	tlan_phy_print(struct net_device *);
+static void	tlan_phy_detect(struct net_device *);
+static void	tlan_phy_power_down(struct net_device *);
+static void	tlan_phy_power_up(struct net_device *);
+static void	tlan_phy_reset(struct net_device *);
+static void	tlan_phy_start_link(struct net_device *);
+static void	tlan_phy_finish_auto_neg(struct net_device *);
 #ifdef MONITOR
-static void     TLan_PhyMonitor( struct net_device * );
+static void     tlan_phy_monitor(struct net_device *);
 #endif
 
 /*
-static int	TLan_PhyNop( struct net_device * );
-static int	TLan_PhyInternalCheck( struct net_device * );
-static int	TLan_PhyInternalService( struct net_device * );
-static int	TLan_PhyDp83840aCheck( struct net_device * );
+  static int	tlan_phy_nop(struct net_device *);
+  static int	tlan_phy_internal_check(struct net_device *);
+  static int	tlan_phy_internal_service(struct net_device *);
+  static int	tlan_phy_dp83840a_check(struct net_device *);
 */
 
-static bool	TLan_MiiReadReg( struct net_device *, u16, u16, u16 * );
-static void	TLan_MiiSendData( u16, u32, unsigned );
-static void	TLan_MiiSync( u16 );
-static void	TLan_MiiWriteReg( struct net_device *, u16, u16, u16 );
+static bool	tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
+static void	tlan_mii_send_data(u16, u32, unsigned);
+static void	tlan_mii_sync(u16);
+static void	tlan_mii_write_reg(struct net_device *, u16, u16, u16);
 
-static void	TLan_EeSendStart( u16 );
-static int	TLan_EeSendByte( u16, u8, int );
-static void	TLan_EeReceiveByte( u16, u8 *, int );
-static int	TLan_EeReadByte( struct net_device *, u8, u8 * );
+static void	tlan_ee_send_start(u16);
+static int	tlan_ee_send_byte(u16, u8, int);
+static void	tlan_ee_receive_byte(u16, u8 *, int);
+static int	tlan_ee_read_byte(struct net_device *, u8, u8 *);
 
 
 static inline void
-TLan_StoreSKB( struct tlan_list_tag *tag, struct sk_buff *skb)
+tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
 {
 	unsigned long addr = (unsigned long)skb;
 	tag->buffer[9].address = addr;
@@ -358,7 +364,7 @@
 }
 
 static inline struct sk_buff *
-TLan_GetSKB( const struct tlan_list_tag *tag)
+tlan_get_skb(const struct tlan_list *tag)
 {
 	unsigned long addr;
 
@@ -367,50 +373,50 @@
 	return (struct sk_buff *) addr;
 }
 
-
-static TLanIntVectorFunc *TLanIntVector[TLAN_INT_NUMBER_OF_INTS] = {
+static u32
+(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
 	NULL,
-	TLan_HandleTxEOF,
-	TLan_HandleStatOverflow,
-	TLan_HandleRxEOF,
-	TLan_HandleDummy,
-	TLan_HandleTxEOC,
-	TLan_HandleStatusCheck,
-	TLan_HandleRxEOC
+	tlan_handle_tx_eof,
+	tlan_handle_stat_overflow,
+	tlan_handle_rx_eof,
+	tlan_handle_dummy,
+	tlan_handle_tx_eoc,
+	tlan_handle_status_check,
+	tlan_handle_rx_eoc
 };
 
 static inline void
-TLan_SetTimer( struct net_device *dev, u32 ticks, u32 type )
+tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
 {
-	TLanPrivateInfo *priv = netdev_priv(dev);
+	struct tlan_priv *priv = netdev_priv(dev);
 	unsigned long flags = 0;
 
 	if (!in_irq())
 		spin_lock_irqsave(&priv->lock, flags);
-	if ( priv->timer.function != NULL &&
-		priv->timerType != TLAN_TIMER_ACTIVITY ) {
+	if (priv->timer.function != NULL &&
+	    priv->timer_type != TLAN_TIMER_ACTIVITY) {
 		if (!in_irq())
 			spin_unlock_irqrestore(&priv->lock, flags);
 		return;
 	}
-	priv->timer.function = TLan_Timer;
+	priv->timer.function = tlan_timer;
 	if (!in_irq())
 		spin_unlock_irqrestore(&priv->lock, flags);
 
 	priv->timer.data = (unsigned long) dev;
-	priv->timerSetAt = jiffies;
-	priv->timerType = type;
+	priv->timer_set_at = jiffies;
+	priv->timer_type = type;
 	mod_timer(&priv->timer, jiffies + ticks);
 
-} /* TLan_SetTimer */
+}
 
 
 /*****************************************************************************
 ******************************************************************************
 
-	ThunderLAN Driver Primary Functions
+ThunderLAN driver primary functions
 
-	These functions are more or less common to all Linux network drivers.
+these functions are more or less common to all linux network drivers.
 
 ******************************************************************************
 *****************************************************************************/
@@ -419,49 +425,117 @@
 
 
 
-	/***************************************************************
-	 *	tlan_remove_one
-	 *
-	 *	Returns:
-	 *		Nothing
-	 *	Parms:
-	 *		None
-	 *
-	 *	Goes through the TLanDevices list and frees the device
-	 *	structs and memory associated with each device (lists
-	 *	and buffers).  It also ureserves the IO port regions
-	 *	associated with this device.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_remove_one
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		None
+ *
+ *	Goes through the TLanDevices list and frees the device
+ *	structs and memory associated with each device (lists
+ *	and buffers).  It also ureserves the IO port regions
+ *	associated with this device.
+ *
+ **************************************************************/
 
 
-static void __devexit tlan_remove_one( struct pci_dev *pdev)
+static void __devexit tlan_remove_one(struct pci_dev *pdev)
 {
-	struct net_device *dev = pci_get_drvdata( pdev );
-	TLanPrivateInfo	*priv = netdev_priv(dev);
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct tlan_priv	*priv = netdev_priv(dev);
 
-	unregister_netdev( dev );
+	unregister_netdev(dev);
 
-	if ( priv->dmaStorage ) {
-		pci_free_consistent(priv->pciDev,
-				    priv->dmaSize, priv->dmaStorage,
-				    priv->dmaStorageDMA );
+	if (priv->dma_storage) {
+		pci_free_consistent(priv->pci_dev,
+				    priv->dma_size, priv->dma_storage,
+				    priv->dma_storage_dma);
 	}
 
 #ifdef CONFIG_PCI
 	pci_release_regions(pdev);
 #endif
 
-	free_netdev( dev );
+	free_netdev(dev);
 
-	pci_set_drvdata( pdev, NULL );
+	pci_set_drvdata(pdev, NULL);
 }
 
+static void tlan_start(struct net_device *dev)
+{
+	tlan_reset_lists(dev);
+	/* NOTE: It might not be necessary to read the stats before a
+	   reset if you don't care what the values are.
+	*/
+	tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+	tlan_reset_adapter(dev);
+	netif_wake_queue(dev);
+}
+
+static void tlan_stop(struct net_device *dev)
+{
+	struct tlan_priv *priv = netdev_priv(dev);
+
+	tlan_read_and_clear_stats(dev, TLAN_RECORD);
+	outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
+	/* Reset and power down phy */
+	tlan_reset_adapter(dev);
+	if (priv->timer.function != NULL) {
+		del_timer_sync(&priv->timer);
+		priv->timer.function = NULL;
+	}
+}
+
+#ifdef CONFIG_PM
+
+static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+
+	if (netif_running(dev))
+		tlan_stop(dev);
+
+	netif_device_detach(dev);
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_wake_from_d3(pdev, false);
+	pci_set_power_state(pdev, PCI_D3hot);
+
+	return 0;
+}
+
+static int tlan_resume(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	pci_enable_wake(pdev, 0, 0);
+	netif_device_attach(dev);
+
+	if (netif_running(dev))
+		tlan_start(dev);
+
+	return 0;
+}
+
+#else /* CONFIG_PM */
+
+#define tlan_suspend   NULL
+#define tlan_resume    NULL
+
+#endif /* CONFIG_PM */
+
+
 static struct pci_driver tlan_driver = {
 	.name		= "tlan",
 	.id_table	= tlan_pci_tbl,
 	.probe		= tlan_init_one,
 	.remove		= __devexit_p(tlan_remove_one),
+	.suspend	= tlan_suspend,
+	.resume		= tlan_resume,
 };
 
 static int __init tlan_probe(void)
@@ -482,13 +556,13 @@
 	}
 
 	TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
-	TLan_EisaProbe();
+	tlan_eisa_probe();
 
 	printk(KERN_INFO "TLAN: %d device%s installed, PCI: %d  EISA: %d\n",
-		 TLanDevicesInstalled, TLanDevicesInstalled == 1 ? "" : "s",
-		 tlan_have_pci, tlan_have_eisa);
+	       tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
+	       tlan_have_pci, tlan_have_eisa);
 
-	if (TLanDevicesInstalled == 0) {
+	if (tlan_devices_installed == 0) {
 		rc = -ENODEV;
 		goto  err_out_pci_unreg;
 	}
@@ -501,39 +575,39 @@
 }
 
 
-static int __devinit tlan_init_one( struct pci_dev *pdev,
-				    const struct pci_device_id *ent)
+static int __devinit tlan_init_one(struct pci_dev *pdev,
+				   const struct pci_device_id *ent)
 {
-	return TLan_probe1( pdev, -1, -1, 0, ent);
+	return tlan_probe1(pdev, -1, -1, 0, ent);
 }
 
 
 /*
-	***************************************************************
-	 *	tlan_probe1
-	 *
-	 *	Returns:
-	 *		0 on success, error code on error
-	 *	Parms:
-	 *		none
-	 *
-	 *	The name is lower case to fit in with all the rest of
-	 *	the netcard_probe names.  This function looks for
-	 *	another TLan based adapter, setting it up with the
-	 *	allocated device struct if one is found.
-	 *	tlan_probe has been ported to the new net API and
-	 *	now allocates its own device structure. This function
-	 *	is also used by modules.
-	 *
-	 **************************************************************/
+***************************************************************
+*	tlan_probe1
+*
+*	Returns:
+*		0 on success, error code on error
+*	Parms:
+*		none
+*
+*	The name is lower case to fit in with all the rest of
+*	the netcard_probe names.  This function looks for
+*	another TLan based adapter, setting it up with the
+*	allocated device struct if one is found.
+*	tlan_probe has been ported to the new net API and
+*	now allocates its own device structure. This function
+*	is also used by modules.
+*
+**************************************************************/
 
-static int __devinit TLan_probe1(struct pci_dev *pdev,
+static int __devinit tlan_probe1(struct pci_dev *pdev,
 				 long ioaddr, int irq, int rev,
-				 const struct pci_device_id *ent )
+				 const struct pci_device_id *ent)
 {
 
 	struct net_device  *dev;
-	TLanPrivateInfo    *priv;
+	struct tlan_priv  *priv;
 	u16		   device_id;
 	int		   reg, rc = -ENODEV;
 
@@ -543,7 +617,7 @@
 		if (rc)
 			return rc;
 
-		rc = pci_request_regions(pdev, TLanSignature);
+		rc = pci_request_regions(pdev, tlan_signature);
 		if (rc) {
 			printk(KERN_ERR "TLAN: Could not reserve IO regions\n");
 			goto err_out;
@@ -551,7 +625,7 @@
 	}
 #endif  /*  CONFIG_PCI  */
 
-	dev = alloc_etherdev(sizeof(TLanPrivateInfo));
+	dev = alloc_etherdev(sizeof(struct tlan_priv));
 	if (dev == NULL) {
 		printk(KERN_ERR "TLAN: Could not allocate memory for device.\n");
 		rc = -ENOMEM;
@@ -561,26 +635,28 @@
 
 	priv = netdev_priv(dev);
 
-	priv->pciDev = pdev;
+	priv->pci_dev = pdev;
 	priv->dev = dev;
 
 	/* Is this a PCI device? */
 	if (pdev) {
-		u32 		   pci_io_base = 0;
+		u32		   pci_io_base = 0;
 
 		priv->adapter = &board_info[ent->driver_data];
 
 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 		if (rc) {
-			printk(KERN_ERR "TLAN: No suitable PCI mapping available.\n");
+			printk(KERN_ERR
+			       "TLAN: No suitable PCI mapping available.\n");
 			goto err_out_free_dev;
 		}
 
-		for ( reg= 0; reg <= 5; reg ++ ) {
+		for (reg = 0; reg <= 5; reg++) {
 			if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
 				pci_io_base = pci_resource_start(pdev, reg);
-				TLAN_DBG( TLAN_DEBUG_GNRL, "IO mapping is available at %x.\n",
-						pci_io_base);
+				TLAN_DBG(TLAN_DEBUG_GNRL,
+					 "IO mapping is available at %x.\n",
+					 pci_io_base);
 				break;
 			}
 		}
@@ -592,7 +668,7 @@
 
 		dev->base_addr = pci_io_base;
 		dev->irq = pdev->irq;
-		priv->adapterRev = pdev->revision;
+		priv->adapter_rev = pdev->revision;
 		pci_set_master(pdev);
 		pci_set_drvdata(pdev, dev);
 
@@ -602,11 +678,11 @@
 		device_id = inw(ioaddr + EISA_ID2);
 		priv->is_eisa = 1;
 		if (device_id == 0x20F1) {
-			priv->adapter = &board_info[13]; 	/* NetFlex-3/E */
-			priv->adapterRev = 23;			/* TLAN 2.3 */
+			priv->adapter = &board_info[13]; /* NetFlex-3/E */
+			priv->adapter_rev = 23;		/* TLAN 2.3 */
 		} else {
 			priv->adapter = &board_info[14];
-			priv->adapterRev = 10;			/* TLAN 1.0 */
+			priv->adapter_rev = 10;		/* TLAN 1.0 */
 		}
 		dev->base_addr = ioaddr;
 		dev->irq = irq;
@@ -620,11 +696,11 @@
 		priv->speed  = ((dev->mem_start & 0x18) == 0x18) ? 0
 			: (dev->mem_start & 0x18) >> 3;
 
-		if (priv->speed == 0x1) {
+		if (priv->speed == 0x1)
 			priv->speed = TLAN_SPEED_10;
-		} else if (priv->speed == 0x2) {
+		else if (priv->speed == 0x2)
 			priv->speed = TLAN_SPEED_100;
-		}
+
 		debug = priv->debug = dev->mem_end;
 	} else {
 		priv->aui    = aui[boards_found];
@@ -635,11 +711,11 @@
 
 	/* This will be used when we get an adapter error from
 	 * within our irq handler */
-	INIT_WORK(&priv->tlan_tqueue, TLan_tx_timeout_work);
+	INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
 
 	spin_lock_init(&priv->lock);
 
-	rc = TLan_Init(dev);
+	rc = tlan_init(dev);
 	if (rc) {
 		printk(KERN_ERR "TLAN: Could not set up device.\n");
 		goto err_out_free_dev;
@@ -652,29 +728,29 @@
 	}
 
 
-	TLanDevicesInstalled++;
+	tlan_devices_installed++;
 	boards_found++;
 
 	/* pdev is NULL if this is an EISA device */
 	if (pdev)
 		tlan_have_pci++;
 	else {
-		priv->nextDevice = TLan_Eisa_Devices;
-		TLan_Eisa_Devices = dev;
+		priv->next_device = tlan_eisa_devices;
+		tlan_eisa_devices = dev;
 		tlan_have_eisa++;
 	}
 
 	printk(KERN_INFO "TLAN: %s irq=%2d, io=%04x, %s, Rev. %d\n",
-			dev->name,
-			(int) dev->irq,
-			(int) dev->base_addr,
-			priv->adapter->deviceLabel,
-			priv->adapterRev);
+	       dev->name,
+	       (int) dev->irq,
+	       (int) dev->base_addr,
+	       priv->adapter->device_label,
+	       priv->adapter_rev);
 	return 0;
 
 err_out_uninit:
-	pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage,
-			    priv->dmaStorageDMA );
+	pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
+			    priv->dma_storage_dma);
 err_out_free_dev:
 	free_netdev(dev);
 err_out_regions:
@@ -689,22 +765,23 @@
 }
 
 
-static void TLan_Eisa_Cleanup(void)
+static void tlan_eisa_cleanup(void)
 {
 	struct net_device *dev;
-	TLanPrivateInfo *priv;
+	struct tlan_priv *priv;
 
-	while( tlan_have_eisa ) {
-		dev = TLan_Eisa_Devices;
+	while (tlan_have_eisa) {
+		dev = tlan_eisa_devices;
 		priv = netdev_priv(dev);
-		if (priv->dmaStorage) {
-			pci_free_consistent(priv->pciDev, priv->dmaSize,
-					    priv->dmaStorage, priv->dmaStorageDMA );
+		if (priv->dma_storage) {
+			pci_free_consistent(priv->pci_dev, priv->dma_size,
+					    priv->dma_storage,
+					    priv->dma_storage_dma);
 		}
-		release_region( dev->base_addr, 0x10);
-		unregister_netdev( dev );
-		TLan_Eisa_Devices = priv->nextDevice;
-		free_netdev( dev );
+		release_region(dev->base_addr, 0x10);
+		unregister_netdev(dev);
+		tlan_eisa_devices = priv->next_device;
+		free_netdev(dev);
 		tlan_have_eisa--;
 	}
 }
@@ -715,7 +792,7 @@
 	pci_unregister_driver(&tlan_driver);
 
 	if (tlan_have_eisa)
-		TLan_Eisa_Cleanup();
+		tlan_eisa_cleanup();
 
 }
 
@@ -726,24 +803,24 @@
 
 
 
-	/**************************************************************
-	 * 	TLan_EisaProbe
-	 *
-	 *  	Returns: 0 on success, 1 otherwise
-	 *
-	 *  	Parms:	 None
-	 *
-	 *
-	 *  	This functions probes for EISA devices and calls
-	 *  	TLan_probe1 when one is found.
-	 *
-	 *************************************************************/
+/**************************************************************
+ *	tlan_eisa_probe
+ *
+ *	Returns: 0 on success, 1 otherwise
+ *
+ *	Parms:	 None
+ *
+ *
+ *	This functions probes for EISA devices and calls
+ *	TLan_probe1 when one is found.
+ *
+ *************************************************************/
 
-static void  __init TLan_EisaProbe (void)
+static void  __init tlan_eisa_probe(void)
 {
-	long 	ioaddr;
-	int 	rc = -ENODEV;
-	int 	irq;
+	long	ioaddr;
+	int	rc = -ENODEV;
+	int	irq;
 	u16	device_id;
 
 	if (!EISA_bus) {
@@ -754,15 +831,16 @@
 	/* Loop through all slots of the EISA bus */
 	for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
 
-	TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
-		 (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID));
-	TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n",
-		 (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2));
+		TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+			 (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
+		TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+			 (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
 
 
-		TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ",
-				   	(int) ioaddr);
-		if (request_region(ioaddr, 0x10, TLanSignature) == NULL)
+		TLAN_DBG(TLAN_DEBUG_PROBE,
+			 "Probing for EISA adapter at IO: 0x%4x : ",
+			 (int) ioaddr);
+		if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
 			goto out;
 
 		if (inw(ioaddr + EISA_ID) != 0x110E) {
@@ -772,326 +850,326 @@
 
 		device_id = inw(ioaddr + EISA_ID2);
 		if (device_id !=  0x20F1 && device_id != 0x40F1) {
-			release_region (ioaddr, 0x10);
+			release_region(ioaddr, 0x10);
 			goto out;
 		}
 
-	 	if (inb(ioaddr + EISA_CR) != 0x1) { 	/* Check if adapter is enabled */
-			release_region (ioaddr, 0x10);
+		/* check if adapter is enabled */
+		if (inb(ioaddr + EISA_CR) != 0x1) {
+			release_region(ioaddr, 0x10);
 			goto out2;
 		}
 
 		if (debug == 0x10)
-			printk("Found one\n");
+			printk(KERN_INFO "Found one\n");
 
 
 		/* Get irq from board */
-		switch (inb(ioaddr + 0xCC0)) {
-			case(0x10):
-				irq=5;
-				break;
-			case(0x20):
-				irq=9;
-				break;
-			case(0x40):
-				irq=10;
-				break;
-			case(0x80):
-				irq=11;
-				break;
-			default:
-				goto out;
+		switch (inb(ioaddr + 0xcc0)) {
+		case(0x10):
+			irq = 5;
+			break;
+		case(0x20):
+			irq = 9;
+			break;
+		case(0x40):
+			irq = 10;
+			break;
+		case(0x80):
+			irq = 11;
+			break;
+		default:
+			goto out;
 		}
 
 
 		/* Setup the newly found eisa adapter */
-		rc = TLan_probe1( NULL, ioaddr, irq,
-					12, NULL);
+		rc = tlan_probe1(NULL, ioaddr, irq,
+				 12, NULL);
 		continue;
 
-		out:
-			if (debug == 0x10)
-				printk("None found\n");
-			continue;
+out:
+		if (debug == 0x10)
+			printk(KERN_INFO "None found\n");
+		continue;
 
-		out2:	if (debug == 0x10)
-				printk("Card found but it is not enabled, skipping\n");
-			continue;
+out2:
+		if (debug == 0x10)
+			printk(KERN_INFO "Card found but it is not enabled, skipping\n");
+		continue;
 
 	}
 
-} /* TLan_EisaProbe */
+}
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-static void TLan_Poll(struct net_device *dev)
+static void tlan_poll(struct net_device *dev)
 {
 	disable_irq(dev->irq);
-	TLan_HandleInterrupt(dev->irq, dev);
+	tlan_handle_interrupt(dev->irq, dev);
 	enable_irq(dev->irq);
 }
 #endif
 
-static const struct net_device_ops TLan_netdev_ops = {
-	.ndo_open 		= TLan_Open,
-	.ndo_stop		= TLan_Close,
-	.ndo_start_xmit		= TLan_StartTx,
-	.ndo_tx_timeout		= TLan_tx_timeout,
-	.ndo_get_stats		= TLan_GetStats,
-	.ndo_set_multicast_list = TLan_SetMulticastList,
-	.ndo_do_ioctl		= TLan_ioctl,
+static const struct net_device_ops tlan_netdev_ops = {
+	.ndo_open		= tlan_open,
+	.ndo_stop		= tlan_close,
+	.ndo_start_xmit		= tlan_start_tx,
+	.ndo_tx_timeout		= tlan_tx_timeout,
+	.ndo_get_stats		= tlan_get_stats,
+	.ndo_set_multicast_list = tlan_set_multicast_list,
+	.ndo_do_ioctl		= tlan_ioctl,
 	.ndo_change_mtu		= eth_change_mtu,
-	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_set_mac_address	= eth_mac_addr,
 	.ndo_validate_addr	= eth_validate_addr,
 #ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller	 = TLan_Poll,
+	.ndo_poll_controller	 = tlan_poll,
 #endif
 };
 
 
 
-	/***************************************************************
-	 *	TLan_Init
-	 *
-	 *	Returns:
-	 *		0 on success, error code otherwise.
-	 *	Parms:
-	 *		dev	The structure of the device to be
-	 *			init'ed.
-	 *
-	 *	This function completes the initialization of the
-	 *	device structure and driver.  It reserves the IO
-	 *	addresses, allocates memory for the lists and bounce
-	 *	buffers, retrieves the MAC address from the eeprom
-	 *	and assignes the device's methods.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_init
+ *
+ *	Returns:
+ *		0 on success, error code otherwise.
+ *	Parms:
+ *		dev	The structure of the device to be
+ *			init'ed.
+ *
+ *	This function completes the initialization of the
+ *	device structure and driver.  It reserves the IO
+ *	addresses, allocates memory for the lists and bounce
+ *	buffers, retrieves the MAC address from the eeprom
+ *	and assignes the device's methods.
+ *
+ **************************************************************/
 
-static int TLan_Init( struct net_device *dev )
+static int tlan_init(struct net_device *dev)
 {
 	int		dma_size;
-	int 		err;
+	int		err;
 	int		i;
-	TLanPrivateInfo	*priv;
+	struct tlan_priv	*priv;
 
 	priv = netdev_priv(dev);
 
-	dma_size = ( TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS )
-		* ( sizeof(TLanList) );
-	priv->dmaStorage = pci_alloc_consistent(priv->pciDev,
-						dma_size, &priv->dmaStorageDMA);
-	priv->dmaSize = dma_size;
+	dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
+		* (sizeof(struct tlan_list));
+	priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
+						 dma_size,
+						 &priv->dma_storage_dma);
+	priv->dma_size = dma_size;
 
-	if ( priv->dmaStorage == NULL ) {
-		printk(KERN_ERR "TLAN:  Could not allocate lists and buffers for %s.\n",
-			dev->name );
+	if (priv->dma_storage == NULL) {
+		printk(KERN_ERR
+		       "TLAN:  Could not allocate lists and buffers for %s.\n",
+		       dev->name);
 		return -ENOMEM;
 	}
-	memset( priv->dmaStorage, 0, dma_size );
-	priv->rxList = (TLanList *) ALIGN((unsigned long)priv->dmaStorage, 8);
-	priv->rxListDMA = ALIGN(priv->dmaStorageDMA, 8);
-	priv->txList = priv->rxList + TLAN_NUM_RX_LISTS;
-	priv->txListDMA = priv->rxListDMA + sizeof(TLanList) * TLAN_NUM_RX_LISTS;
+	memset(priv->dma_storage, 0, dma_size);
+	priv->rx_list = (struct tlan_list *)
+		ALIGN((unsigned long)priv->dma_storage, 8);
+	priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
+	priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
+	priv->tx_list_dma =
+		priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
 
 	err = 0;
-	for ( i = 0;  i < 6 ; i++ )
-		err |= TLan_EeReadByte( dev,
-					(u8) priv->adapter->addrOfs + i,
-					(u8 *) &dev->dev_addr[i] );
-	if ( err ) {
+	for (i = 0;  i < 6 ; i++)
+		err |= tlan_ee_read_byte(dev,
+					 (u8) priv->adapter->addr_ofs + i,
+					 (u8 *) &dev->dev_addr[i]);
+	if (err) {
 		printk(KERN_ERR "TLAN: %s: Error reading MAC from eeprom: %d\n",
-			dev->name,
-			err );
+		       dev->name,
+		       err);
 	}
 	dev->addr_len = 6;
 
 	netif_carrier_off(dev);
 
 	/* Device methods */
-	dev->netdev_ops = &TLan_netdev_ops;
+	dev->netdev_ops = &tlan_netdev_ops;
 	dev->watchdog_timeo = TX_TIMEOUT;
 
 	return 0;
 
-} /* TLan_Init */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_Open
-	 *
-	 *	Returns:
-	 *		0 on success, error code otherwise.
-	 *	Parms:
-	 *		dev	Structure of device to be opened.
-	 *
-	 *	This routine puts the driver and TLAN adapter in a
-	 *	state where it is ready to send and receive packets.
-	 *	It allocates the IRQ, resets and brings the adapter
-	 *	out of reset, and allows interrupts.  It also delays
-	 *	the startup for autonegotiation or sends a Rx GO
-	 *	command to the adapter, as appropriate.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_open
+ *
+ *	Returns:
+ *		0 on success, error code otherwise.
+ *	Parms:
+ *		dev	Structure of device to be opened.
+ *
+ *	This routine puts the driver and TLAN adapter in a
+ *	state where it is ready to send and receive packets.
+ *	It allocates the IRQ, resets and brings the adapter
+ *	out of reset, and allows interrupts.  It also delays
+ *	the startup for autonegotiation or sends a Rx GO
+ *	command to the adapter, as appropriate.
+ *
+ **************************************************************/
 
-static int TLan_Open( struct net_device *dev )
+static int tlan_open(struct net_device *dev)
 {
-	TLanPrivateInfo	*priv = netdev_priv(dev);
+	struct tlan_priv	*priv = netdev_priv(dev);
 	int		err;
 
-	priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION );
-	err = request_irq( dev->irq, TLan_HandleInterrupt, IRQF_SHARED,
-			   dev->name, dev );
+	priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
+	err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
+			  dev->name, dev);
 
-	if ( err ) {
+	if (err) {
 		pr_err("TLAN:  Cannot open %s because IRQ %d is already in use.\n",
-		       dev->name, dev->irq );
+		       dev->name, dev->irq);
 		return err;
 	}
 
 	init_timer(&priv->timer);
-	netif_start_queue(dev);
 
-	/* NOTE: It might not be necessary to read the stats before a
-			 reset if you don't care what the values are.
-	*/
-	TLan_ResetLists( dev );
-	TLan_ReadAndClearStats( dev, TLAN_IGNORE );
-	TLan_ResetAdapter( dev );
+	tlan_start(dev);
 
-	TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened.  TLAN Chip Rev: %x\n",
-		  dev->name, priv->tlanRev );
+	TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened.  TLAN Chip Rev: %x\n",
+		 dev->name, priv->tlan_rev);
 
 	return 0;
 
-} /* TLan_Open */
+}
 
 
 
-	/**************************************************************
-	 *	TLan_ioctl
-	 *
-	 *	Returns:
-	 *		0 on success, error code otherwise
-	 *	Params:
-	 *		dev	structure of device to receive ioctl.
-	 *
-	 *		rq	ifreq structure to hold userspace data.
-	 *
-	 *		cmd	ioctl command.
-	 *
-	 *
-	 *************************************************************/
+/**************************************************************
+ *	tlan_ioctl
+ *
+ *	Returns:
+ *		0 on success, error code otherwise
+ *	Params:
+ *		dev	structure of device to receive ioctl.
+ *
+ *		rq	ifreq structure to hold userspace data.
+ *
+ *		cmd	ioctl command.
+ *
+ *
+ *************************************************************/
 
-static int TLan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
-	TLanPrivateInfo *priv = netdev_priv(dev);
+	struct tlan_priv *priv = netdev_priv(dev);
 	struct mii_ioctl_data *data = if_mii(rq);
-	u32 phy   = priv->phy[priv->phyNum];
+	u32 phy   = priv->phy[priv->phy_num];
 
-	if (!priv->phyOnline)
+	if (!priv->phy_online)
 		return -EAGAIN;
 
-	switch(cmd) {
-	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
-			data->phy_id = phy;
+	switch (cmd) {
+	case SIOCGMIIPHY:		/* get address of MII PHY in use. */
+		data->phy_id = phy;
 
 
-	case SIOCGMIIREG:		/* Read MII PHY register. */
-			TLan_MiiReadReg(dev, data->phy_id & 0x1f,
-					data->reg_num & 0x1f, &data->val_out);
-			return 0;
+	case SIOCGMIIREG:		/* read MII PHY register. */
+		tlan_mii_read_reg(dev, data->phy_id & 0x1f,
+				  data->reg_num & 0x1f, &data->val_out);
+		return 0;
 
 
-	case SIOCSMIIREG:		/* Write MII PHY register. */
-			TLan_MiiWriteReg(dev, data->phy_id & 0x1f,
-					 data->reg_num & 0x1f, data->val_in);
-			return 0;
-		default:
-			return -EOPNOTSUPP;
+	case SIOCSMIIREG:		/* write MII PHY register. */
+		tlan_mii_write_reg(dev, data->phy_id & 0x1f,
+				   data->reg_num & 0x1f, data->val_in);
+		return 0;
+	default:
+		return -EOPNOTSUPP;
 	}
-} /* tlan_ioctl */
+}
 
 
-	/***************************************************************
-	 * 	TLan_tx_timeout
-	 *
-	 * 	Returns: nothing
-	 *
-	 * 	Params:
-	 * 		dev	structure of device which timed out
-	 * 			during transmit.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_tx_timeout
+ *
+ *	Returns: nothing
+ *
+ *	Params:
+ *		dev	structure of device which timed out
+ *			during transmit.
+ *
+ **************************************************************/
 
-static void TLan_tx_timeout(struct net_device *dev)
+static void tlan_tx_timeout(struct net_device *dev)
 {
 
-	TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
+	TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
 
 	/* Ok so we timed out, lets see what we can do about it...*/
-	TLan_FreeLists( dev );
-	TLan_ResetLists( dev );
-	TLan_ReadAndClearStats( dev, TLAN_IGNORE );
-	TLan_ResetAdapter( dev );
+	tlan_free_lists(dev);
+	tlan_reset_lists(dev);
+	tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+	tlan_reset_adapter(dev);
 	dev->trans_start = jiffies; /* prevent tx timeout */
-	netif_wake_queue( dev );
+	netif_wake_queue(dev);
 
 }
 
 
-	/***************************************************************
-	 * 	TLan_tx_timeout_work
-	 *
-	 * 	Returns: nothing
-	 *
-	 * 	Params:
-	 * 		work	work item of device which timed out
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_tx_timeout_work
+ *
+ *	Returns: nothing
+ *
+ *	Params:
+ *		work	work item of device which timed out
+ *
+ **************************************************************/
 
-static void TLan_tx_timeout_work(struct work_struct *work)
+static void tlan_tx_timeout_work(struct work_struct *work)
 {
-	TLanPrivateInfo	*priv =
-		container_of(work, TLanPrivateInfo, tlan_tqueue);
+	struct tlan_priv	*priv =
+		container_of(work, struct tlan_priv, tlan_tqueue);
 
-	TLan_tx_timeout(priv->dev);
+	tlan_tx_timeout(priv->dev);
 }
 
 
 
-	/***************************************************************
-	 *	TLan_StartTx
-	 *
-	 *	Returns:
-	 *		0 on success, non-zero on failure.
-	 *	Parms:
-	 *		skb	A pointer to the sk_buff containing the
-	 *			frame to be sent.
-	 *		dev	The device to send the data on.
-	 *
-	 *	This function adds a frame to the Tx list to be sent
-	 *	ASAP.  First it	verifies that the adapter is ready and
-	 *	there is room in the queue.  Then it sets up the next
-	 *	available list, copies the frame to the	corresponding
-	 *	buffer.  If the adapter Tx channel is idle, it gives
-	 *	the adapter a Tx Go command on the list, otherwise it
-	 *	sets the forward address of the previous list to point
-	 *	to this one.  Then it frees the sk_buff.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_start_tx
+ *
+ *	Returns:
+ *		0 on success, non-zero on failure.
+ *	Parms:
+ *		skb	A pointer to the sk_buff containing the
+ *			frame to be sent.
+ *		dev	The device to send the data on.
+ *
+ *	This function adds a frame to the Tx list to be sent
+ *	ASAP.  First it	verifies that the adapter is ready and
+ *	there is room in the queue.  Then it sets up the next
+ *	available list, copies the frame to the	corresponding
+ *	buffer.  If the adapter Tx channel is idle, it gives
+ *	the adapter a Tx Go command on the list, otherwise it
+ *	sets the forward address of the previous list to point
+ *	to this one.  Then it frees the sk_buff.
+ *
+ **************************************************************/
 
-static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
+static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
 {
-	TLanPrivateInfo *priv = netdev_priv(dev);
+	struct tlan_priv *priv = netdev_priv(dev);
 	dma_addr_t	tail_list_phys;
-	TLanList	*tail_list;
+	struct tlan_list	*tail_list;
 	unsigned long	flags;
 	unsigned int    txlen;
 
-	if ( ! priv->phyOnline ) {
-		TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT:  %s PHY is not ready\n",
-			  dev->name );
+	if (!priv->phy_online) {
+		TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT:  %s PHY is not ready\n",
+			 dev->name);
 		dev_kfree_skb_any(skb);
 		return NETDEV_TX_OK;
 	}
@@ -1100,218 +1178,214 @@
 		return NETDEV_TX_OK;
 	txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
 
-	tail_list = priv->txList + priv->txTail;
-	tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail;
+	tail_list = priv->tx_list + priv->tx_tail;
+	tail_list_phys =
+		priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
 
-	if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) {
-		TLAN_DBG( TLAN_DEBUG_TX,
-			  "TRANSMIT:  %s is busy (Head=%d Tail=%d)\n",
-			  dev->name, priv->txHead, priv->txTail );
+	if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
+		TLAN_DBG(TLAN_DEBUG_TX,
+			 "TRANSMIT:  %s is busy (Head=%d Tail=%d)\n",
+			 dev->name, priv->tx_head, priv->tx_tail);
 		netif_stop_queue(dev);
-		priv->txBusyCount++;
+		priv->tx_busy_count++;
 		return NETDEV_TX_BUSY;
 	}
 
 	tail_list->forward = 0;
 
-	tail_list->buffer[0].address = pci_map_single(priv->pciDev,
+	tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
 						      skb->data, txlen,
 						      PCI_DMA_TODEVICE);
-	TLan_StoreSKB(tail_list, skb);
+	tlan_store_skb(tail_list, skb);
 
-	tail_list->frameSize = (u16) txlen;
+	tail_list->frame_size = (u16) txlen;
 	tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
 	tail_list->buffer[1].count = 0;
 	tail_list->buffer[1].address = 0;
 
 	spin_lock_irqsave(&priv->lock, flags);
-	tail_list->cStat = TLAN_CSTAT_READY;
-	if ( ! priv->txInProgress ) {
-		priv->txInProgress = 1;
-		TLAN_DBG( TLAN_DEBUG_TX,
-			  "TRANSMIT:  Starting TX on buffer %d\n", priv->txTail );
-		outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM );
-		outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD );
+	tail_list->c_stat = TLAN_CSTAT_READY;
+	if (!priv->tx_in_progress) {
+		priv->tx_in_progress = 1;
+		TLAN_DBG(TLAN_DEBUG_TX,
+			 "TRANSMIT:  Starting TX on buffer %d\n",
+			 priv->tx_tail);
+		outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
+		outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
 	} else {
-		TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT:  Adding buffer %d to TX channel\n",
-			  priv->txTail );
-		if ( priv->txTail == 0 ) {
-			( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward
+		TLAN_DBG(TLAN_DEBUG_TX,
+			 "TRANSMIT:  Adding buffer %d to TX channel\n",
+			 priv->tx_tail);
+		if (priv->tx_tail == 0) {
+			(priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
 				= tail_list_phys;
 		} else {
-			( priv->txList + ( priv->txTail - 1 ) )->forward
+			(priv->tx_list + (priv->tx_tail - 1))->forward
 				= tail_list_phys;
 		}
 	}
 	spin_unlock_irqrestore(&priv->lock, flags);
 
-	CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS );
+	CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
 
 	return NETDEV_TX_OK;
 
-} /* TLan_StartTx */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_HandleInterrupt
-	 *
-	 *	Returns:
-	 *		Nothing
-	 *	Parms:
-	 *		irq	The line on which the interrupt
-	 *			occurred.
-	 *		dev_id	A pointer to the device assigned to
-	 *			this irq line.
-	 *
-	 *	This function handles an interrupt generated by its
-	 *	assigned TLAN adapter.  The function deactivates
-	 *	interrupts on its adapter, records the type of
-	 *	interrupt, executes the appropriate subhandler, and
-	 *	acknowdges the interrupt to the adapter (thus
-	 *	re-enabling adapter interrupts.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_handle_interrupt
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		irq	The line on which the interrupt
+ *			occurred.
+ *		dev_id	A pointer to the device assigned to
+ *			this irq line.
+ *
+ *	This function handles an interrupt generated by its
+ *	assigned TLAN adapter.  The function deactivates
+ *	interrupts on its adapter, records the type of
+ *	interrupt, executes the appropriate subhandler, and
+ *	acknowdges the interrupt to the adapter (thus
+ *	re-enabling adapter interrupts.
+ *
+ **************************************************************/
 
-static irqreturn_t TLan_HandleInterrupt(int irq, void *dev_id)
+static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
 {
 	struct net_device	*dev = dev_id;
-	TLanPrivateInfo *priv = netdev_priv(dev);
+	struct tlan_priv *priv = netdev_priv(dev);
 	u16		host_int;
 	u16		type;
 
 	spin_lock(&priv->lock);
 
-	host_int = inw( dev->base_addr + TLAN_HOST_INT );
-	type = ( host_int & TLAN_HI_IT_MASK ) >> 2;
-	if ( type ) {
+	host_int = inw(dev->base_addr + TLAN_HOST_INT);
+	type = (host_int & TLAN_HI_IT_MASK) >> 2;
+	if (type) {
 		u32	ack;
 		u32	host_cmd;
 
-		outw( host_int, dev->base_addr + TLAN_HOST_INT );
-		ack = TLanIntVector[type]( dev, host_int );
+		outw(host_int, dev->base_addr + TLAN_HOST_INT);
+		ack = tlan_int_vector[type](dev, host_int);
 
-		if ( ack ) {
-			host_cmd = TLAN_HC_ACK | ack | ( type << 18 );
-			outl( host_cmd, dev->base_addr + TLAN_HOST_CMD );
+		if (ack) {
+			host_cmd = TLAN_HC_ACK | ack | (type << 18);
+			outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
 		}
 	}
 
 	spin_unlock(&priv->lock);
 
 	return IRQ_RETVAL(type);
-} /* TLan_HandleInterrupts */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_Close
-	 *
-	 * 	Returns:
-	 *		An error code.
-	 *	Parms:
-	 *		dev	The device structure of the device to
-	 *			close.
-	 *
-	 *	This function shuts down the adapter.  It records any
-	 *	stats, puts the adapter into reset state, deactivates
-	 *	its time as needed, and	frees the irq it is using.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_close
+ *
+ *	Returns:
+ *		An error code.
+ *	Parms:
+ *		dev	The device structure of the device to
+ *			close.
+ *
+ *	This function shuts down the adapter.  It records any
+ *	stats, puts the adapter into reset state, deactivates
+ *	its time as needed, and	frees the irq it is using.
+ *
+ **************************************************************/
 
-static int TLan_Close(struct net_device *dev)
+static int tlan_close(struct net_device *dev)
 {
-	TLanPrivateInfo *priv = netdev_priv(dev);
+	struct tlan_priv *priv = netdev_priv(dev);
 
-	netif_stop_queue(dev);
 	priv->neg_be_verbose = 0;
+	tlan_stop(dev);
 
-	TLan_ReadAndClearStats( dev, TLAN_RECORD );
-	outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
-	if ( priv->timer.function != NULL ) {
-		del_timer_sync( &priv->timer );
-		priv->timer.function = NULL;
-	}
-
-	free_irq( dev->irq, dev );
-	TLan_FreeLists( dev );
-	TLAN_DBG( TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name );
+	free_irq(dev->irq, dev);
+	tlan_free_lists(dev);
+	TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
 
 	return 0;
 
-} /* TLan_Close */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_GetStats
-	 *
-	 *	Returns:
-	 *		A pointer to the device's statistics structure.
-	 *	Parms:
-	 *		dev	The device structure to return the
-	 *			stats for.
-	 *
-	 *	This function updates the devices statistics by reading
-	 *	the TLAN chip's onboard registers.  Then it returns the
-	 *	address of the statistics structure.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_get_stats
+ *
+ *	Returns:
+ *		A pointer to the device's statistics structure.
+ *	Parms:
+ *		dev	The device structure to return the
+ *			stats for.
+ *
+ *	This function updates the devices statistics by reading
+ *	the TLAN chip's onboard registers.  Then it returns the
+ *	address of the statistics structure.
+ *
+ **************************************************************/
 
-static struct net_device_stats *TLan_GetStats( struct net_device *dev )
+static struct net_device_stats *tlan_get_stats(struct net_device *dev)
 {
-	TLanPrivateInfo	*priv = netdev_priv(dev);
+	struct tlan_priv	*priv = netdev_priv(dev);
 	int i;
 
 	/* Should only read stats if open ? */
-	TLan_ReadAndClearStats( dev, TLAN_RECORD );
+	tlan_read_and_clear_stats(dev, TLAN_RECORD);
 
-	TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE:  %s EOC count = %d\n", dev->name,
-		  priv->rxEocCount );
-	TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT:  %s Busy count = %d\n", dev->name,
-		  priv->txBusyCount );
-	if ( debug & TLAN_DEBUG_GNRL ) {
-		TLan_PrintDio( dev->base_addr );
-		TLan_PhyPrint( dev );
+	TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE:  %s EOC count = %d\n", dev->name,
+		 priv->rx_eoc_count);
+	TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT:  %s Busy count = %d\n", dev->name,
+		 priv->tx_busy_count);
+	if (debug & TLAN_DEBUG_GNRL) {
+		tlan_print_dio(dev->base_addr);
+		tlan_phy_print(dev);
 	}
-	if ( debug & TLAN_DEBUG_LIST ) {
-		for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ )
-			TLan_PrintList( priv->rxList + i, "RX", i );
-		for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ )
-			TLan_PrintList( priv->txList + i, "TX", i );
+	if (debug & TLAN_DEBUG_LIST) {
+		for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
+			tlan_print_list(priv->rx_list + i, "RX", i);
+		for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
+			tlan_print_list(priv->tx_list + i, "TX", i);
 	}
 
 	return &dev->stats;
 
-} /* TLan_GetStats */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_SetMulticastList
-	 *
-	 *	Returns:
-	 *		Nothing
-	 *	Parms:
-	 *		dev	The device structure to set the
-	 *			multicast list for.
-	 *
-	 *	This function sets the TLAN adaptor to various receive
-	 *	modes.  If the IFF_PROMISC flag is set, promiscuous
-	 *	mode is acitviated.  Otherwise,	promiscuous mode is
-	 *	turned off.  If the IFF_ALLMULTI flag is set, then
-	 *	the hash table is set to receive all group addresses.
-	 *	Otherwise, the first three multicast addresses are
-	 *	stored in AREG_1-3, and the rest are selected via the
-	 *	hash table, as necessary.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_set_multicast_list
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		dev	The device structure to set the
+ *			multicast list for.
+ *
+ *	This function sets the TLAN adaptor to various receive
+ *	modes.  If the IFF_PROMISC flag is set, promiscuous
+ *	mode is acitviated.  Otherwise,	promiscuous mode is
+ *	turned off.  If the IFF_ALLMULTI flag is set, then
+ *	the hash table is set to receive all group addresses.
+ *	Otherwise, the first three multicast addresses are
+ *	stored in AREG_1-3, and the rest are selected via the
+ *	hash table, as necessary.
+ *
+ **************************************************************/
 
-static void TLan_SetMulticastList( struct net_device *dev )
+static void tlan_set_multicast_list(struct net_device *dev)
 {
 	struct netdev_hw_addr *ha;
 	u32			hash1 = 0;
@@ -1320,53 +1394,56 @@
 	u32			offset;
 	u8			tmp;
 
-	if ( dev->flags & IFF_PROMISC ) {
-		tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
-		TLan_DioWrite8( dev->base_addr,
-				TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF );
+	if (dev->flags & IFF_PROMISC) {
+		tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+		tlan_dio_write8(dev->base_addr,
+				TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
 	} else {
-		tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD );
-		TLan_DioWrite8( dev->base_addr,
-				TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF );
-		if ( dev->flags & IFF_ALLMULTI ) {
-			for ( i = 0; i < 3; i++ )
-				TLan_SetMac( dev, i + 1, NULL );
-			TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF );
-			TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
+		tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+		tlan_dio_write8(dev->base_addr,
+				TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
+		if (dev->flags & IFF_ALLMULTI) {
+			for (i = 0; i < 3; i++)
+				tlan_set_mac(dev, i + 1, NULL);
+			tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
+					 0xffffffff);
+			tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
+					 0xffffffff);
 		} else {
 			i = 0;
 			netdev_for_each_mc_addr(ha, dev) {
-				if ( i < 3 ) {
-					TLan_SetMac( dev, i + 1,
+				if (i < 3) {
+					tlan_set_mac(dev, i + 1,
 						     (char *) &ha->addr);
 				} else {
-					offset = TLan_HashFunc((u8 *)&ha->addr);
-					if ( offset < 32 )
-						hash1 |= ( 1 << offset );
+					offset =
+						tlan_hash_func((u8 *)&ha->addr);
+					if (offset < 32)
+						hash1 |= (1 << offset);
 					else
-						hash2 |= ( 1 << ( offset - 32 ) );
+						hash2 |= (1 << (offset - 32));
 				}
 				i++;
 			}
-			for ( ; i < 3; i++ )
-				TLan_SetMac( dev, i + 1, NULL );
-			TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, hash1 );
-			TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, hash2 );
+			for ( ; i < 3; i++)
+				tlan_set_mac(dev, i + 1, NULL);
+			tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
+			tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
 		}
 	}
 
-} /* TLan_SetMulticastList */
+}
 
 
 
 /*****************************************************************************
 ******************************************************************************
 
-        ThunderLAN Driver Interrupt Vectors and Table
+ThunderLAN driver interrupt vectors and table
 
-	Please see Chap. 4, "Interrupt Handling" of the "ThunderLAN
-	Programmer's Guide" for more informations on handling interrupts
-	generated by TLAN based adapters.
+please see chap. 4, "Interrupt Handling" of the "ThunderLAN
+Programmer's Guide" for more informations on handling interrupts
+generated by TLAN based adapters.
 
 ******************************************************************************
 *****************************************************************************/
@@ -1374,46 +1451,48 @@
 
 
 
-	/***************************************************************
-	 *	TLan_HandleTxEOF
-	 *
-	 *	Returns:
-	 *		1
-	 *	Parms:
-	 *		dev		Device assigned the IRQ that was
-	 *				raised.
-	 *		host_int	The contents of the HOST_INT
-	 *				port.
-	 *
-	 *	This function handles Tx EOF interrupts which are raised
-	 *	by the adapter when it has completed sending the
-	 *	contents of a buffer.  If detemines which list/buffer
-	 *	was completed and resets it.  If the buffer was the last
-	 *	in the channel (EOC), then the function checks to see if
-	 *	another buffer is ready to send, and if so, sends a Tx
-	 *	Go command.  Finally, the driver activates/continues the
-	 *	activity LED.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_handle_tx_eof
+ *
+ *	Returns:
+ *		1
+ *	Parms:
+ *		dev		Device assigned the IRQ that was
+ *				raised.
+ *		host_int	The contents of the HOST_INT
+ *				port.
+ *
+ *	This function handles Tx EOF interrupts which are raised
+ *	by the adapter when it has completed sending the
+ *	contents of a buffer.  If detemines which list/buffer
+ *	was completed and resets it.  If the buffer was the last
+ *	in the channel (EOC), then the function checks to see if
+ *	another buffer is ready to send, and if so, sends a Tx
+ *	Go command.  Finally, the driver activates/continues the
+ *	activity LED.
+ *
+ **************************************************************/
 
-static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
 {
-	TLanPrivateInfo	*priv = netdev_priv(dev);
+	struct tlan_priv	*priv = netdev_priv(dev);
 	int		eoc = 0;
-	TLanList	*head_list;
+	struct tlan_list	*head_list;
 	dma_addr_t	head_list_phys;
 	u32		ack = 0;
-	u16		tmpCStat;
+	u16		tmp_c_stat;
 
-	TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT:  Handling TX EOF (Head=%d Tail=%d)\n",
-		  priv->txHead, priv->txTail );
-	head_list = priv->txList + priv->txHead;
+	TLAN_DBG(TLAN_DEBUG_TX,
+		 "TRANSMIT:  Handling TX EOF (Head=%d Tail=%d)\n",
+		 priv->tx_head, priv->tx_tail);
+	head_list = priv->tx_list + priv->tx_head;
 
-	while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
-		struct sk_buff *skb = TLan_GetSKB(head_list);
+	while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+	       && (ack < 255)) {
+		struct sk_buff *skb = tlan_get_skb(head_list);
 
 		ack++;
-		pci_unmap_single(priv->pciDev, head_list->buffer[0].address,
+		pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
 				 max(skb->len,
 				     (unsigned int)TLAN_MIN_FRAME_SIZE),
 				 PCI_DMA_TODEVICE);
@@ -1421,304 +1500,313 @@
 		head_list->buffer[8].address = 0;
 		head_list->buffer[9].address = 0;
 
-		if ( tmpCStat & TLAN_CSTAT_EOC )
+		if (tmp_c_stat & TLAN_CSTAT_EOC)
 			eoc = 1;
 
-		dev->stats.tx_bytes += head_list->frameSize;
+		dev->stats.tx_bytes += head_list->frame_size;
 
-		head_list->cStat = TLAN_CSTAT_UNUSED;
+		head_list->c_stat = TLAN_CSTAT_UNUSED;
 		netif_start_queue(dev);
-		CIRC_INC( priv->txHead, TLAN_NUM_TX_LISTS );
-		head_list = priv->txList + priv->txHead;
+		CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
+		head_list = priv->tx_list + priv->tx_head;
 	}
 
 	if (!ack)
-		printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n");
+		printk(KERN_INFO
+		       "TLAN: Received interrupt for uncompleted TX frame.\n");
 
-	if ( eoc ) {
-		TLAN_DBG( TLAN_DEBUG_TX,
-			  "TRANSMIT:  Handling TX EOC (Head=%d Tail=%d)\n",
-			  priv->txHead, priv->txTail );
-		head_list = priv->txList + priv->txHead;
-		head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
-		if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
-			outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
+	if (eoc) {
+		TLAN_DBG(TLAN_DEBUG_TX,
+			 "TRANSMIT:  handling TX EOC (Head=%d Tail=%d)\n",
+			 priv->tx_head, priv->tx_tail);
+		head_list = priv->tx_list + priv->tx_head;
+		head_list_phys = priv->tx_list_dma
+			+ sizeof(struct tlan_list)*priv->tx_head;
+		if ((head_list->c_stat & TLAN_CSTAT_READY)
+		    == TLAN_CSTAT_READY) {
+			outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
 			ack |= TLAN_HC_GO;
 		} else {
-			priv->txInProgress = 0;
+			priv->tx_in_progress = 0;
 		}
 	}
 
-	if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
-		TLan_DioWrite8( dev->base_addr,
-				TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
-		if ( priv->timer.function == NULL ) {
-			 priv->timer.function = TLan_Timer;
-			 priv->timer.data = (unsigned long) dev;
-			 priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
-			 priv->timerSetAt = jiffies;
-			 priv->timerType = TLAN_TIMER_ACTIVITY;
-			 add_timer(&priv->timer);
-		} else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
-			priv->timerSetAt = jiffies;
+	if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+		tlan_dio_write8(dev->base_addr,
+				TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+		if (priv->timer.function == NULL) {
+			priv->timer.function = tlan_timer;
+			priv->timer.data = (unsigned long) dev;
+			priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
+			priv->timer_set_at = jiffies;
+			priv->timer_type = TLAN_TIMER_ACTIVITY;
+			add_timer(&priv->timer);
+		} else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+			priv->timer_set_at = jiffies;
 		}
 	}
 
 	return ack;
 
-} /* TLan_HandleTxEOF */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_HandleStatOverflow
-	 *
-	 *	Returns:
-	 *		1
-	 *	Parms:
-	 *		dev		Device assigned the IRQ that was
-	 *				raised.
-	 *		host_int	The contents of the HOST_INT
-	 *				port.
-	 *
-	 *	This function handles the Statistics Overflow interrupt
-	 *	which means that one or more of the TLAN statistics
-	 *	registers has reached 1/2 capacity and needs to be read.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	TLan_HandleStatOverflow
+ *
+ *	Returns:
+ *		1
+ *	Parms:
+ *		dev		Device assigned the IRQ that was
+ *				raised.
+ *		host_int	The contents of the HOST_INT
+ *				port.
+ *
+ *	This function handles the Statistics Overflow interrupt
+ *	which means that one or more of the TLAN statistics
+ *	registers has reached 1/2 capacity and needs to be read.
+ *
+ **************************************************************/
 
-static u32 TLan_HandleStatOverflow( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
 {
-	TLan_ReadAndClearStats( dev, TLAN_RECORD );
+	tlan_read_and_clear_stats(dev, TLAN_RECORD);
 
 	return 1;
 
-} /* TLan_HandleStatOverflow */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_HandleRxEOF
-	 *
-	 *	Returns:
-	 *		1
-	 *	Parms:
-	 *		dev		Device assigned the IRQ that was
-	 *				raised.
-	 *		host_int	The contents of the HOST_INT
-	 *				port.
-	 *
-	 *	This function handles the Rx EOF interrupt which
-	 *	indicates a frame has been received by the adapter from
-	 *	the net and the frame has been transferred to memory.
-	 *	The function determines the bounce buffer the frame has
-	 *	been loaded into, creates a new sk_buff big enough to
-	 *	hold the frame, and sends it to protocol stack.  It
-	 *	then resets the used buffer and appends it to the end
-	 *	of the list.  If the frame was the last in the Rx
-	 *	channel (EOC), the function restarts the receive channel
-	 *	by sending an Rx Go command to the adapter.  Then it
-	 *	activates/continues the activity LED.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	TLan_HandleRxEOF
+ *
+ *	Returns:
+ *		1
+ *	Parms:
+ *		dev		Device assigned the IRQ that was
+ *				raised.
+ *		host_int	The contents of the HOST_INT
+ *				port.
+ *
+ *	This function handles the Rx EOF interrupt which
+ *	indicates a frame has been received by the adapter from
+ *	the net and the frame has been transferred to memory.
+ *	The function determines the bounce buffer the frame has
+ *	been loaded into, creates a new sk_buff big enough to
+ *	hold the frame, and sends it to protocol stack.  It
+ *	then resets the used buffer and appends it to the end
+ *	of the list.  If the frame was the last in the Rx
+ *	channel (EOC), the function restarts the receive channel
+ *	by sending an Rx Go command to the adapter.  Then it
+ *	activates/continues the activity LED.
+ *
+ **************************************************************/
 
-static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
 {
-	TLanPrivateInfo	*priv = netdev_priv(dev);
+	struct tlan_priv	*priv = netdev_priv(dev);
 	u32		ack = 0;
 	int		eoc = 0;
-	TLanList	*head_list;
+	struct tlan_list	*head_list;
 	struct sk_buff	*skb;
-	TLanList	*tail_list;
-	u16		tmpCStat;
+	struct tlan_list	*tail_list;
+	u16		tmp_c_stat;
 	dma_addr_t	head_list_phys;
 
-	TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE:  Handling RX EOF (Head=%d Tail=%d)\n",
-		  priv->rxHead, priv->rxTail );
-	head_list = priv->rxList + priv->rxHead;
-	head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
+	TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE:  handling RX EOF (Head=%d Tail=%d)\n",
+		 priv->rx_head, priv->rx_tail);
+	head_list = priv->rx_list + priv->rx_head;
+	head_list_phys =
+		priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
 
-	while (((tmpCStat = head_list->cStat) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) {
-		dma_addr_t frameDma = head_list->buffer[0].address;
-		u32 frameSize = head_list->frameSize;
+	while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+	       && (ack < 255)) {
+		dma_addr_t frame_dma = head_list->buffer[0].address;
+		u32 frame_size = head_list->frame_size;
 		struct sk_buff *new_skb;
 
 		ack++;
-		if (tmpCStat & TLAN_CSTAT_EOC)
+		if (tmp_c_stat & TLAN_CSTAT_EOC)
 			eoc = 1;
 
 		new_skb = netdev_alloc_skb_ip_align(dev,
 						    TLAN_MAX_FRAME_SIZE + 5);
-		if ( !new_skb )
+		if (!new_skb)
 			goto drop_and_reuse;
 
-		skb = TLan_GetSKB(head_list);
-		pci_unmap_single(priv->pciDev, frameDma,
+		skb = tlan_get_skb(head_list);
+		pci_unmap_single(priv->pci_dev, frame_dma,
 				 TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
-		skb_put( skb, frameSize );
+		skb_put(skb, frame_size);
 
-		dev->stats.rx_bytes += frameSize;
+		dev->stats.rx_bytes += frame_size;
 
-		skb->protocol = eth_type_trans( skb, dev );
-		netif_rx( skb );
+		skb->protocol = eth_type_trans(skb, dev);
+		netif_rx(skb);
 
-		head_list->buffer[0].address = pci_map_single(priv->pciDev,
-							      new_skb->data,
-							      TLAN_MAX_FRAME_SIZE,
-							      PCI_DMA_FROMDEVICE);
+		head_list->buffer[0].address =
+			pci_map_single(priv->pci_dev, new_skb->data,
+				       TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
 
-		TLan_StoreSKB(head_list, new_skb);
+		tlan_store_skb(head_list, new_skb);
 drop_and_reuse:
 		head_list->forward = 0;
-		head_list->cStat = 0;
-		tail_list = priv->rxList + priv->rxTail;
+		head_list->c_stat = 0;
+		tail_list = priv->rx_list + priv->rx_tail;
 		tail_list->forward = head_list_phys;
 
-		CIRC_INC( priv->rxHead, TLAN_NUM_RX_LISTS );
-		CIRC_INC( priv->rxTail, TLAN_NUM_RX_LISTS );
-		head_list = priv->rxList + priv->rxHead;
-		head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
+		CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
+		CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
+		head_list = priv->rx_list + priv->rx_head;
+		head_list_phys = priv->rx_list_dma
+			+ sizeof(struct tlan_list)*priv->rx_head;
 	}
 
 	if (!ack)
-		printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n");
+		printk(KERN_INFO
+		       "TLAN: Received interrupt for uncompleted RX frame.\n");
 
 
-	if ( eoc ) {
-		TLAN_DBG( TLAN_DEBUG_RX,
-			  "RECEIVE:  Handling RX EOC (Head=%d Tail=%d)\n",
-			  priv->rxHead, priv->rxTail );
-		head_list = priv->rxList + priv->rxHead;
-		head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
-		outl(head_list_phys, dev->base_addr + TLAN_CH_PARM );
+	if (eoc) {
+		TLAN_DBG(TLAN_DEBUG_RX,
+			 "RECEIVE:  handling RX EOC (Head=%d Tail=%d)\n",
+			 priv->rx_head, priv->rx_tail);
+		head_list = priv->rx_list + priv->rx_head;
+		head_list_phys = priv->rx_list_dma
+			+ sizeof(struct tlan_list)*priv->rx_head;
+		outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
 		ack |= TLAN_HC_GO | TLAN_HC_RT;
-		priv->rxEocCount++;
+		priv->rx_eoc_count++;
 	}
 
-	if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) {
-		TLan_DioWrite8( dev->base_addr,
-				TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT );
-		if ( priv->timer.function == NULL )  {
-			priv->timer.function = TLan_Timer;
+	if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+		tlan_dio_write8(dev->base_addr,
+				TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+		if (priv->timer.function == NULL)  {
+			priv->timer.function = tlan_timer;
 			priv->timer.data = (unsigned long) dev;
 			priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
-			priv->timerSetAt = jiffies;
-			priv->timerType = TLAN_TIMER_ACTIVITY;
+			priv->timer_set_at = jiffies;
+			priv->timer_type = TLAN_TIMER_ACTIVITY;
 			add_timer(&priv->timer);
-		} else if ( priv->timerType == TLAN_TIMER_ACTIVITY ) {
-			priv->timerSetAt = jiffies;
+		} else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+			priv->timer_set_at = jiffies;
 		}
 	}
 
 	return ack;
 
-} /* TLan_HandleRxEOF */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_HandleDummy
-	 *
-	 *	Returns:
-	 *		1
-	 *	Parms:
-	 *		dev		Device assigned the IRQ that was
-	 *				raised.
-	 *		host_int	The contents of the HOST_INT
-	 *				port.
-	 *
-	 *	This function handles the Dummy interrupt, which is
-	 *	raised whenever a test interrupt is generated by setting
-	 *	the Req_Int bit of HOST_CMD to 1.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_handle_dummy
+ *
+ *	Returns:
+ *		1
+ *	Parms:
+ *		dev		Device assigned the IRQ that was
+ *				raised.
+ *		host_int	The contents of the HOST_INT
+ *				port.
+ *
+ *	This function handles the Dummy interrupt, which is
+ *	raised whenever a test interrupt is generated by setting
+ *	the Req_Int bit of HOST_CMD to 1.
+ *
+ **************************************************************/
 
-static u32 TLan_HandleDummy( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
 {
-	printk( "TLAN:  Test interrupt on %s.\n", dev->name );
+	pr_info("TLAN:  Test interrupt on %s.\n", dev->name);
 	return 1;
 
-} /* TLan_HandleDummy */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_HandleTxEOC
-	 *
-	 *	Returns:
-	 *		1
-	 *	Parms:
-	 *		dev		Device assigned the IRQ that was
-	 *				raised.
-	 *		host_int	The contents of the HOST_INT
-	 *				port.
-	 *
-	 *	This driver is structured to determine EOC occurrences by
-	 *	reading the CSTAT member of the list structure.  Tx EOC
-	 *	interrupts are disabled via the DIO INTDIS register.
-	 *	However, TLAN chips before revision 3.0 didn't have this
-	 *	functionality, so process EOC events if this is the
-	 *	case.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_handle_tx_eoc
+ *
+ *	Returns:
+ *		1
+ *	Parms:
+ *		dev		Device assigned the IRQ that was
+ *				raised.
+ *		host_int	The contents of the HOST_INT
+ *				port.
+ *
+ *	This driver is structured to determine EOC occurrences by
+ *	reading the CSTAT member of the list structure.  Tx EOC
+ *	interrupts are disabled via the DIO INTDIS register.
+ *	However, TLAN chips before revision 3.0 didn't have this
+ *	functionality, so process EOC events if this is the
+ *	case.
+ *
+ **************************************************************/
 
-static u32 TLan_HandleTxEOC( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
 {
-	TLanPrivateInfo	*priv = netdev_priv(dev);
-	TLanList		*head_list;
+	struct tlan_priv	*priv = netdev_priv(dev);
+	struct tlan_list		*head_list;
 	dma_addr_t		head_list_phys;
 	u32			ack = 1;
 
 	host_int = 0;
-	if ( priv->tlanRev < 0x30 ) {
-		TLAN_DBG( TLAN_DEBUG_TX,
-			  "TRANSMIT:  Handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
-			  priv->txHead, priv->txTail );
-		head_list = priv->txList + priv->txHead;
-		head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead;
-		if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) {
+	if (priv->tlan_rev < 0x30) {
+		TLAN_DBG(TLAN_DEBUG_TX,
+			 "TRANSMIT:  handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
+			 priv->tx_head, priv->tx_tail);
+		head_list = priv->tx_list + priv->tx_head;
+		head_list_phys = priv->tx_list_dma
+			+ sizeof(struct tlan_list)*priv->tx_head;
+		if ((head_list->c_stat & TLAN_CSTAT_READY)
+		    == TLAN_CSTAT_READY) {
 			netif_stop_queue(dev);
-			outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
+			outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
 			ack |= TLAN_HC_GO;
 		} else {
-			priv->txInProgress = 0;
+			priv->tx_in_progress = 0;
 		}
 	}
 
 	return ack;
 
-} /* TLan_HandleTxEOC */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_HandleStatusCheck
-	 *
-	 *	Returns:
-	 *		0 if Adapter check, 1 if Network Status check.
-	 *	Parms:
-	 *		dev		Device assigned the IRQ that was
-	 *				raised.
-	 *		host_int	The contents of the HOST_INT
-	 *				port.
-	 *
-	 *	This function handles Adapter Check/Network Status
-	 *	interrupts generated by the adapter.  It checks the
-	 *	vector in the HOST_INT register to determine if it is
-	 *	an Adapter Check interrupt.  If so, it resets the
-	 *	adapter.  Otherwise it clears the status registers
-	 *	and services the PHY.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_handle_status_check
+ *
+ *	Returns:
+ *		0 if Adapter check, 1 if Network Status check.
+ *	Parms:
+ *		dev		Device assigned the IRQ that was
+ *				raised.
+ *		host_int	The contents of the HOST_INT
+ *				port.
+ *
+ *	This function handles Adapter Check/Network Status
+ *	interrupts generated by the adapter.  It checks the
+ *	vector in the HOST_INT register to determine if it is
+ *	an Adapter Check interrupt.  If so, it resets the
+ *	adapter.  Otherwise it clears the status registers
+ *	and services the PHY.
+ *
+ **************************************************************/
 
-static u32 TLan_HandleStatusCheck( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
 {
-	TLanPrivateInfo	*priv = netdev_priv(dev);
+	struct tlan_priv	*priv = netdev_priv(dev);
 	u32		ack;
 	u32		error;
 	u8		net_sts;
@@ -1727,92 +1815,94 @@
 	u16		tlphy_sts;
 
 	ack = 1;
-	if ( host_int & TLAN_HI_IV_MASK ) {
-		netif_stop_queue( dev );
-		error = inl( dev->base_addr + TLAN_CH_PARM );
-		printk( "TLAN:  %s: Adaptor Error = 0x%x\n", dev->name, error );
-		TLan_ReadAndClearStats( dev, TLAN_RECORD );
-		outl( TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD );
+	if (host_int & TLAN_HI_IV_MASK) {
+		netif_stop_queue(dev);
+		error = inl(dev->base_addr + TLAN_CH_PARM);
+		pr_info("TLAN:  %s: Adaptor Error = 0x%x\n", dev->name, error);
+		tlan_read_and_clear_stats(dev, TLAN_RECORD);
+		outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
 
 		schedule_work(&priv->tlan_tqueue);
 
 		netif_wake_queue(dev);
 		ack = 0;
 	} else {
-		TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name );
-		phy = priv->phy[priv->phyNum];
+		TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
+		phy = priv->phy[priv->phy_num];
 
-		net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS );
-		if ( net_sts ) {
-			TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts );
-			TLAN_DBG( TLAN_DEBUG_GNRL, "%s:    Net_Sts = %x\n",
-				  dev->name, (unsigned) net_sts );
+		net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
+		if (net_sts) {
+			tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
+			TLAN_DBG(TLAN_DEBUG_GNRL, "%s:    Net_Sts = %x\n",
+				 dev->name, (unsigned) net_sts);
 		}
-		if ( ( net_sts & TLAN_NET_STS_MIRQ ) &&  ( priv->phyNum == 0 ) ) {
-			TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts );
-			TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
-        		if ( ! ( tlphy_sts & TLAN_TS_POLOK ) &&
-			     ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
-                		tlphy_ctl |= TLAN_TC_SWAPOL;
-                		TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
-			} else if ( ( tlphy_sts & TLAN_TS_POLOK ) &&
-				    ( tlphy_ctl & TLAN_TC_SWAPOL ) ) {
-                		tlphy_ctl &= ~TLAN_TC_SWAPOL;
-                		TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
-        		}
-
-			if (debug) {
-				TLan_PhyPrint( dev );
+		if ((net_sts & TLAN_NET_STS_MIRQ) &&  (priv->phy_num == 0)) {
+			tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
+			tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+			if (!(tlphy_sts & TLAN_TS_POLOK) &&
+			    !(tlphy_ctl & TLAN_TC_SWAPOL)) {
+				tlphy_ctl |= TLAN_TC_SWAPOL;
+				tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+						   tlphy_ctl);
+			} else if ((tlphy_sts & TLAN_TS_POLOK) &&
+				   (tlphy_ctl & TLAN_TC_SWAPOL)) {
+				tlphy_ctl &= ~TLAN_TC_SWAPOL;
+				tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+						   tlphy_ctl);
 			}
+
+			if (debug)
+				tlan_phy_print(dev);
 		}
 	}
 
 	return ack;
 
-} /* TLan_HandleStatusCheck */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_HandleRxEOC
-	 *
-	 *	Returns:
-	 *		1
-	 *	Parms:
-	 *		dev		Device assigned the IRQ that was
-	 *				raised.
-	 *		host_int	The contents of the HOST_INT
-	 *				port.
-	 *
-	 *	This driver is structured to determine EOC occurrences by
-	 *	reading the CSTAT member of the list structure.  Rx EOC
-	 *	interrupts are disabled via the DIO INTDIS register.
-	 *	However, TLAN chips before revision 3.0 didn't have this
-	 *	CSTAT member or a INTDIS register, so if this chip is
-	 *	pre-3.0, process EOC interrupts normally.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_handle_rx_eoc
+ *
+ *	Returns:
+ *		1
+ *	Parms:
+ *		dev		Device assigned the IRQ that was
+ *				raised.
+ *		host_int	The contents of the HOST_INT
+ *				port.
+ *
+ *	This driver is structured to determine EOC occurrences by
+ *	reading the CSTAT member of the list structure.  Rx EOC
+ *	interrupts are disabled via the DIO INTDIS register.
+ *	However, TLAN chips before revision 3.0 didn't have this
+ *	CSTAT member or a INTDIS register, so if this chip is
+ *	pre-3.0, process EOC interrupts normally.
+ *
+ **************************************************************/
 
-static u32 TLan_HandleRxEOC( struct net_device *dev, u16 host_int )
+static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
 {
-	TLanPrivateInfo	*priv = netdev_priv(dev);
+	struct tlan_priv	*priv = netdev_priv(dev);
 	dma_addr_t	head_list_phys;
 	u32		ack = 1;
 
-	if (  priv->tlanRev < 0x30 ) {
-		TLAN_DBG( TLAN_DEBUG_RX,
-			  "RECEIVE:  Handling RX EOC (Head=%d Tail=%d) -- IRQ\n",
-			  priv->rxHead, priv->rxTail );
-		head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead;
-		outl( head_list_phys, dev->base_addr + TLAN_CH_PARM );
+	if (priv->tlan_rev < 0x30) {
+		TLAN_DBG(TLAN_DEBUG_RX,
+			 "RECEIVE:  Handling RX EOC (head=%d tail=%d) -- IRQ\n",
+			 priv->rx_head, priv->rx_tail);
+		head_list_phys = priv->rx_list_dma
+			+ sizeof(struct tlan_list)*priv->rx_head;
+		outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
 		ack |= TLAN_HC_GO | TLAN_HC_RT;
-		priv->rxEocCount++;
+		priv->rx_eoc_count++;
 	}
 
 	return ack;
 
-} /* TLan_HandleRxEOC */
+}
 
 
 
@@ -1820,98 +1910,98 @@
 /*****************************************************************************
 ******************************************************************************
 
-	ThunderLAN Driver Timer Function
+ThunderLAN driver timer function
 
 ******************************************************************************
 *****************************************************************************/
 
 
-	/***************************************************************
-	 *	TLan_Timer
-	 *
-	 *	Returns:
-	 *		Nothing
-	 *	Parms:
-	 *		data	A value given to add timer when
-	 *			add_timer was called.
-	 *
-	 *	This function handles timed functionality for the
-	 *	TLAN driver.  The two current timer uses are for
-	 *	delaying for autonegotionation and driving the ACT LED.
-	 *	-	Autonegotiation requires being allowed about
-	 *		2 1/2 seconds before attempting to transmit a
-	 *		packet.  It would be a very bad thing to hang
-	 *		the kernel this long, so the driver doesn't
-	 *		allow transmission 'til after this time, for
-	 *		certain PHYs.  It would be much nicer if all
-	 *		PHYs were interrupt-capable like the internal
-	 *		PHY.
-	 *	-	The ACT LED, which shows adapter activity, is
-	 *		driven by the driver, and so must be left on
-	 *		for a short period to power up the LED so it
-	 *		can be seen.  This delay can be changed by
-	 *		changing the TLAN_TIMER_ACT_DELAY in tlan.h,
-	 *		if desired.  100 ms  produces a slightly
-	 *		sluggish response.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_timer
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		data	A value given to add timer when
+ *			add_timer was called.
+ *
+ *	This function handles timed functionality for the
+ *	TLAN driver.  The two current timer uses are for
+ *	delaying for autonegotionation and driving the ACT LED.
+ *	-	Autonegotiation requires being allowed about
+ *		2 1/2 seconds before attempting to transmit a
+ *		packet.  It would be a very bad thing to hang
+ *		the kernel this long, so the driver doesn't
+ *		allow transmission 'til after this time, for
+ *		certain PHYs.  It would be much nicer if all
+ *		PHYs were interrupt-capable like the internal
+ *		PHY.
+ *	-	The ACT LED, which shows adapter activity, is
+ *		driven by the driver, and so must be left on
+ *		for a short period to power up the LED so it
+ *		can be seen.  This delay can be changed by
+ *		changing the TLAN_TIMER_ACT_DELAY in tlan.h,
+ *		if desired.  100 ms  produces a slightly
+ *		sluggish response.
+ *
+ **************************************************************/
 
-static void TLan_Timer( unsigned long data )
+static void tlan_timer(unsigned long data)
 {
 	struct net_device	*dev = (struct net_device *) data;
-	TLanPrivateInfo	*priv = netdev_priv(dev);
+	struct tlan_priv	*priv = netdev_priv(dev);
 	u32		elapsed;
 	unsigned long	flags = 0;
 
 	priv->timer.function = NULL;
 
-	switch ( priv->timerType ) {
+	switch (priv->timer_type) {
 #ifdef MONITOR
-		case TLAN_TIMER_LINK_BEAT:
-			TLan_PhyMonitor( dev );
-			break;
+	case TLAN_TIMER_LINK_BEAT:
+		tlan_phy_monitor(dev);
+		break;
 #endif
-		case TLAN_TIMER_PHY_PDOWN:
-			TLan_PhyPowerDown( dev );
-			break;
-		case TLAN_TIMER_PHY_PUP:
-			TLan_PhyPowerUp( dev );
-			break;
-		case TLAN_TIMER_PHY_RESET:
-			TLan_PhyReset( dev );
-			break;
-		case TLAN_TIMER_PHY_START_LINK:
-			TLan_PhyStartLink( dev );
-			break;
-		case TLAN_TIMER_PHY_FINISH_AN:
-			TLan_PhyFinishAutoNeg( dev );
-			break;
-		case TLAN_TIMER_FINISH_RESET:
-			TLan_FinishReset( dev );
-			break;
-		case TLAN_TIMER_ACTIVITY:
-			spin_lock_irqsave(&priv->lock, flags);
-			if ( priv->timer.function == NULL ) {
-				elapsed = jiffies - priv->timerSetAt;
-				if ( elapsed >= TLAN_TIMER_ACT_DELAY ) {
-					TLan_DioWrite8( dev->base_addr,
-							TLAN_LED_REG, TLAN_LED_LINK );
-				} else  {
-					priv->timer.function = TLan_Timer;
-					priv->timer.expires = priv->timerSetAt
-						+ TLAN_TIMER_ACT_DELAY;
-					spin_unlock_irqrestore(&priv->lock, flags);
-					add_timer( &priv->timer );
-					break;
-				}
+	case TLAN_TIMER_PHY_PDOWN:
+		tlan_phy_power_down(dev);
+		break;
+	case TLAN_TIMER_PHY_PUP:
+		tlan_phy_power_up(dev);
+		break;
+	case TLAN_TIMER_PHY_RESET:
+		tlan_phy_reset(dev);
+		break;
+	case TLAN_TIMER_PHY_START_LINK:
+		tlan_phy_start_link(dev);
+		break;
+	case TLAN_TIMER_PHY_FINISH_AN:
+		tlan_phy_finish_auto_neg(dev);
+		break;
+	case TLAN_TIMER_FINISH_RESET:
+		tlan_finish_reset(dev);
+		break;
+	case TLAN_TIMER_ACTIVITY:
+		spin_lock_irqsave(&priv->lock, flags);
+		if (priv->timer.function == NULL) {
+			elapsed = jiffies - priv->timer_set_at;
+			if (elapsed >= TLAN_TIMER_ACT_DELAY) {
+				tlan_dio_write8(dev->base_addr,
+						TLAN_LED_REG, TLAN_LED_LINK);
+			} else  {
+				priv->timer.function = tlan_timer;
+				priv->timer.expires = priv->timer_set_at
+					+ TLAN_TIMER_ACT_DELAY;
+				spin_unlock_irqrestore(&priv->lock, flags);
+				add_timer(&priv->timer);
+				break;
 			}
-			spin_unlock_irqrestore(&priv->lock, flags);
-			break;
-		default:
-			break;
+		}
+		spin_unlock_irqrestore(&priv->lock, flags);
+		break;
+	default:
+		break;
 	}
 
-} /* TLan_Timer */
+}
 
 
 
@@ -1919,39 +2009,39 @@
 /*****************************************************************************
 ******************************************************************************
 
-	ThunderLAN Driver Adapter Related Routines
+ThunderLAN driver adapter related routines
 
 ******************************************************************************
 *****************************************************************************/
 
 
-	/***************************************************************
-	 *	TLan_ResetLists
-	 *
-	 *	Returns:
-	 *		Nothing
-	 *	Parms:
-	 *		dev	The device structure with the list
-	 *			stuctures to be reset.
-	 *
-	 *	This routine sets the variables associated with managing
-	 *	the TLAN lists to their initial values.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_reset_lists
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		dev	The device structure with the list
+ *			stuctures to be reset.
+ *
+ *	This routine sets the variables associated with managing
+ *	the TLAN lists to their initial values.
+ *
+ **************************************************************/
 
-static void TLan_ResetLists( struct net_device *dev )
+static void tlan_reset_lists(struct net_device *dev)
 {
-	TLanPrivateInfo *priv = netdev_priv(dev);
+	struct tlan_priv *priv = netdev_priv(dev);
 	int		i;
-	TLanList	*list;
+	struct tlan_list	*list;
 	dma_addr_t	list_phys;
 	struct sk_buff	*skb;
 
-	priv->txHead = 0;
-	priv->txTail = 0;
-	for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
-		list = priv->txList + i;
-		list->cStat = TLAN_CSTAT_UNUSED;
+	priv->tx_head = 0;
+	priv->tx_tail = 0;
+	for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+		list = priv->tx_list + i;
+		list->c_stat = TLAN_CSTAT_UNUSED;
 		list->buffer[0].address = 0;
 		list->buffer[2].count = 0;
 		list->buffer[2].address = 0;
@@ -1959,169 +2049,169 @@
 		list->buffer[9].address = 0;
 	}
 
-	priv->rxHead = 0;
-	priv->rxTail = TLAN_NUM_RX_LISTS - 1;
-	for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
-		list = priv->rxList + i;
-		list_phys = priv->rxListDMA + sizeof(TLanList) * i;
-		list->cStat = TLAN_CSTAT_READY;
-		list->frameSize = TLAN_MAX_FRAME_SIZE;
+	priv->rx_head = 0;
+	priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
+	for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+		list = priv->rx_list + i;
+		list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
+		list->c_stat = TLAN_CSTAT_READY;
+		list->frame_size = TLAN_MAX_FRAME_SIZE;
 		list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
 		skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
-		if ( !skb ) {
-			pr_err("TLAN: out of memory for received data.\n" );
+		if (!skb) {
+			pr_err("TLAN: out of memory for received data.\n");
 			break;
 		}
 
-		list->buffer[0].address = pci_map_single(priv->pciDev,
+		list->buffer[0].address = pci_map_single(priv->pci_dev,
 							 skb->data,
 							 TLAN_MAX_FRAME_SIZE,
 							 PCI_DMA_FROMDEVICE);
-		TLan_StoreSKB(list, skb);
+		tlan_store_skb(list, skb);
 		list->buffer[1].count = 0;
 		list->buffer[1].address = 0;
-		list->forward = list_phys + sizeof(TLanList);
+		list->forward = list_phys + sizeof(struct tlan_list);
 	}
 
 	/* in case ran out of memory early, clear bits */
 	while (i < TLAN_NUM_RX_LISTS) {
-		TLan_StoreSKB(priv->rxList + i, NULL);
+		tlan_store_skb(priv->rx_list + i, NULL);
 		++i;
 	}
 	list->forward = 0;
 
-} /* TLan_ResetLists */
+}
 
 
-static void TLan_FreeLists( struct net_device *dev )
+static void tlan_free_lists(struct net_device *dev)
 {
-	TLanPrivateInfo *priv = netdev_priv(dev);
+	struct tlan_priv *priv = netdev_priv(dev);
 	int		i;
-	TLanList	*list;
+	struct tlan_list	*list;
 	struct sk_buff	*skb;
 
-	for ( i = 0; i < TLAN_NUM_TX_LISTS; i++ ) {
-		list = priv->txList + i;
-		skb = TLan_GetSKB(list);
-		if ( skb ) {
+	for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+		list = priv->tx_list + i;
+		skb = tlan_get_skb(list);
+		if (skb) {
 			pci_unmap_single(
-				priv->pciDev,
+				priv->pci_dev,
 				list->buffer[0].address,
 				max(skb->len,
 				    (unsigned int)TLAN_MIN_FRAME_SIZE),
 				PCI_DMA_TODEVICE);
-			dev_kfree_skb_any( skb );
+			dev_kfree_skb_any(skb);
 			list->buffer[8].address = 0;
 			list->buffer[9].address = 0;
 		}
 	}
 
-	for ( i = 0; i < TLAN_NUM_RX_LISTS; i++ ) {
-		list = priv->rxList + i;
-		skb = TLan_GetSKB(list);
-		if ( skb ) {
-			pci_unmap_single(priv->pciDev,
+	for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+		list = priv->rx_list + i;
+		skb = tlan_get_skb(list);
+		if (skb) {
+			pci_unmap_single(priv->pci_dev,
 					 list->buffer[0].address,
 					 TLAN_MAX_FRAME_SIZE,
 					 PCI_DMA_FROMDEVICE);
-			dev_kfree_skb_any( skb );
+			dev_kfree_skb_any(skb);
 			list->buffer[8].address = 0;
 			list->buffer[9].address = 0;
 		}
 	}
-} /* TLan_FreeLists */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_PrintDio
-	 *
-	 *	Returns:
-	 *		Nothing
-	 *	Parms:
-	 *		io_base		Base IO port of the device of
-	 *				which to print DIO registers.
-	 *
-	 *	This function prints out all the internal (DIO)
-	 *	registers of a TLAN chip.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_print_dio
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		io_base		Base IO port of the device of
+ *				which to print DIO registers.
+ *
+ *	This function prints out all the internal (DIO)
+ *	registers of a TLAN chip.
+ *
+ **************************************************************/
 
-static void TLan_PrintDio( u16 io_base )
+static void tlan_print_dio(u16 io_base)
 {
 	u32 data0, data1;
 	int	i;
 
-	printk( "TLAN:   Contents of internal registers for io base 0x%04hx.\n",
-		io_base );
-	printk( "TLAN:      Off.  +0         +4\n" );
-	for ( i = 0; i < 0x4C; i+= 8 ) {
-		data0 = TLan_DioRead32( io_base, i );
-		data1 = TLan_DioRead32( io_base, i + 0x4 );
-		printk( "TLAN:      0x%02x  0x%08x 0x%08x\n", i, data0, data1 );
+	pr_info("TLAN:   Contents of internal registers for io base 0x%04hx.\n",
+	       io_base);
+	pr_info("TLAN:      Off.  +0	 +4\n");
+	for (i = 0; i < 0x4C; i += 8) {
+		data0 = tlan_dio_read32(io_base, i);
+		data1 = tlan_dio_read32(io_base, i + 0x4);
+		pr_info("TLAN:      0x%02x  0x%08x 0x%08x\n", i, data0, data1);
 	}
 
-} /* TLan_PrintDio */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_PrintList
-	 *
-	 *	Returns:
-	 *		Nothing
-	 *	Parms:
-	 *		list	A pointer to the TLanList structure to
-	 *			be printed.
-	 *		type	A string to designate type of list,
-	 *			"Rx" or "Tx".
-	 *		num	The index of the list.
-	 *
-	 *	This function prints out the contents of the list
-	 *	pointed to by the list parameter.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	TLan_PrintList
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		list	A pointer to the struct tlan_list structure to
+ *			be printed.
+ *		type	A string to designate type of list,
+ *			"Rx" or "Tx".
+ *		num	The index of the list.
+ *
+ *	This function prints out the contents of the list
+ *	pointed to by the list parameter.
+ *
+ **************************************************************/
 
-static void TLan_PrintList( TLanList *list, char *type, int num)
+static void tlan_print_list(struct tlan_list *list, char *type, int num)
 {
 	int i;
 
-	printk( "TLAN:   %s List %d at %p\n", type, num, list );
-	printk( "TLAN:      Forward    = 0x%08x\n",  list->forward );
-	printk( "TLAN:      CSTAT      = 0x%04hx\n", list->cStat );
-	printk( "TLAN:      Frame Size = 0x%04hx\n", list->frameSize );
-	/* for ( i = 0; i < 10; i++ ) { */
-	for ( i = 0; i < 2; i++ ) {
-		printk( "TLAN:      Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
-			i, list->buffer[i].count, list->buffer[i].address );
+	pr_info("TLAN:   %s List %d at %p\n", type, num, list);
+	pr_info("TLAN:      Forward    = 0x%08x\n",  list->forward);
+	pr_info("TLAN:      CSTAT      = 0x%04hx\n", list->c_stat);
+	pr_info("TLAN:      Frame Size = 0x%04hx\n", list->frame_size);
+	/* for (i = 0; i < 10; i++) { */
+	for (i = 0; i < 2; i++) {
+		pr_info("TLAN:      Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
+		       i, list->buffer[i].count, list->buffer[i].address);
 	}
 
-} /* TLan_PrintList */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_ReadAndClearStats
-	 *
-	 *	Returns:
-	 *		Nothing
-	 *	Parms:
-	 *		dev	Pointer to device structure of adapter
-	 *			to which to read stats.
-	 *		record	Flag indicating whether to add
-	 *
-	 *	This functions reads all the internal status registers
-	 *	of the TLAN chip, which clears them as a side effect.
-	 *	It then either adds the values to the device's status
-	 *	struct, or discards them, depending on whether record
-	 *	is TLAN_RECORD (!=0)  or TLAN_IGNORE (==0).
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_read_and_clear_stats
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		dev	Pointer to device structure of adapter
+ *			to which to read stats.
+ *		record	Flag indicating whether to add
+ *
+ *	This functions reads all the internal status registers
+ *	of the TLAN chip, which clears them as a side effect.
+ *	It then either adds the values to the device's status
+ *	struct, or discards them, depending on whether record
+ *	is TLAN_RECORD (!=0)  or TLAN_IGNORE (==0).
+ *
+ **************************************************************/
 
-static void TLan_ReadAndClearStats( struct net_device *dev, int record )
+static void tlan_read_and_clear_stats(struct net_device *dev, int record)
 {
 	u32		tx_good, tx_under;
 	u32		rx_good, rx_over;
@@ -2129,41 +2219,42 @@
 	u32		multi_col, single_col;
 	u32		excess_col, late_col, loss;
 
-	outw( TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR );
-	tx_good  = inb( dev->base_addr + TLAN_DIO_DATA );
-	tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
-	tx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
-	tx_under = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
+	outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+	tx_good  = inb(dev->base_addr + TLAN_DIO_DATA);
+	tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+	tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+	tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
 
-	outw( TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR );
-	rx_good  = inb( dev->base_addr + TLAN_DIO_DATA );
-	rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
-	rx_good += inb( dev->base_addr + TLAN_DIO_DATA + 2 ) << 16;
-	rx_over  = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
+	outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+	rx_good  = inb(dev->base_addr + TLAN_DIO_DATA);
+	rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+	rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+	rx_over  = inb(dev->base_addr + TLAN_DIO_DATA + 3);
 
-	outw( TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR );
-	def_tx  = inb( dev->base_addr + TLAN_DIO_DATA );
-	def_tx += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
-	crc     = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
-	code    = inb( dev->base_addr + TLAN_DIO_DATA + 3 );
+	outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
+	def_tx  = inb(dev->base_addr + TLAN_DIO_DATA);
+	def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+	crc     = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+	code    = inb(dev->base_addr + TLAN_DIO_DATA + 3);
 
-	outw( TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
-	multi_col   = inb( dev->base_addr + TLAN_DIO_DATA );
-	multi_col  += inb( dev->base_addr + TLAN_DIO_DATA + 1 ) << 8;
-	single_col  = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
-	single_col += inb( dev->base_addr + TLAN_DIO_DATA + 3 ) << 8;
+	outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+	multi_col   = inb(dev->base_addr + TLAN_DIO_DATA);
+	multi_col  += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+	single_col  = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+	single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
 
-	outw( TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR );
-	excess_col = inb( dev->base_addr + TLAN_DIO_DATA );
-	late_col   = inb( dev->base_addr + TLAN_DIO_DATA + 1 );
-	loss       = inb( dev->base_addr + TLAN_DIO_DATA + 2 );
+	outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+	excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
+	late_col   = inb(dev->base_addr + TLAN_DIO_DATA + 1);
+	loss       = inb(dev->base_addr + TLAN_DIO_DATA + 2);
 
-	if ( record ) {
+	if (record) {
 		dev->stats.rx_packets += rx_good;
 		dev->stats.rx_errors  += rx_over + crc + code;
 		dev->stats.tx_packets += tx_good;
 		dev->stats.tx_errors  += tx_under + loss;
-		dev->stats.collisions += multi_col + single_col + excess_col + late_col;
+		dev->stats.collisions += multi_col
+			+ single_col + excess_col + late_col;
 
 		dev->stats.rx_over_errors    += rx_over;
 		dev->stats.rx_crc_errors     += crc;
@@ -2173,39 +2264,39 @@
 		dev->stats.tx_carrier_errors += loss;
 	}
 
-} /* TLan_ReadAndClearStats */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_Reset
-	 *
-	 *	Returns:
-	 *		0
-	 *	Parms:
-	 *		dev	Pointer to device structure of adapter
-	 *			to be reset.
-	 *
-	 *	This function resets the adapter and it's physical
-	 *	device.  See Chap. 3, pp. 9-10 of the "ThunderLAN
-	 *	Programmer's Guide" for details.  The routine tries to
-	 *	implement what is detailed there, though adjustments
-	 *	have been made.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	TLan_Reset
+ *
+ *	Returns:
+ *		0
+ *	Parms:
+ *		dev	Pointer to device structure of adapter
+ *			to be reset.
+ *
+ *	This function resets the adapter and it's physical
+ *	device.  See Chap. 3, pp. 9-10 of the "ThunderLAN
+ *	Programmer's Guide" for details.  The routine tries to
+ *	implement what is detailed there, though adjustments
+ *	have been made.
+ *
+ **************************************************************/
 
 static void
-TLan_ResetAdapter( struct net_device *dev )
+tlan_reset_adapter(struct net_device *dev)
 {
-	TLanPrivateInfo	*priv = netdev_priv(dev);
+	struct tlan_priv	*priv = netdev_priv(dev);
 	int		i;
 	u32		addr;
 	u32		data;
 	u8		data8;
 
-	priv->tlanFullDuplex = false;
-	priv->phyOnline=0;
+	priv->tlan_full_duplex = false;
+	priv->phy_online = 0;
 	netif_carrier_off(dev);
 
 /*  1.	Assert reset bit. */
@@ -2216,7 +2307,7 @@
 
 	udelay(1000);
 
-/*  2.	Turn off interrupts. ( Probably isn't necessary ) */
+/*  2.	Turn off interrupts. (Probably isn't necessary) */
 
 	data = inl(dev->base_addr + TLAN_HOST_CMD);
 	data |= TLAN_HC_INT_OFF;
@@ -2224,207 +2315,208 @@
 
 /*  3.	Clear AREGs and HASHs. */
 
- 	for ( i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4 ) {
-		TLan_DioWrite32( dev->base_addr, (u16) i, 0 );
-	}
+	for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
+		tlan_dio_write32(dev->base_addr, (u16) i, 0);
 
 /*  4.	Setup NetConfig register. */
 
 	data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
-	TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
+	tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
 
 /*  5.	Load Ld_Tmr and Ld_Thr in HOST_CMD. */
 
- 	outl( TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD );
- 	outl( TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD );
+	outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
+	outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
 
 /*  6.	Unreset the MII by setting NMRST (in NetSio) to 1. */
 
-	outw( TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR );
+	outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
 	addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
-	TLan_SetBit( TLAN_NET_SIO_NMRST, addr );
+	tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
 
 /*  7.	Setup the remaining registers. */
 
-	if ( priv->tlanRev >= 0x30 ) {
+	if (priv->tlan_rev >= 0x30) {
 		data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
-		TLan_DioWrite8( dev->base_addr, TLAN_INT_DIS, data8 );
+		tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
 	}
-	TLan_PhyDetect( dev );
+	tlan_phy_detect(dev);
 	data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
 
-	if ( priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY ) {
+	if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
 		data |= TLAN_NET_CFG_BIT;
-		if ( priv->aui == 1 ) {
-			TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a );
-		} else if ( priv->duplex == TLAN_DUPLEX_FULL ) {
-			TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 );
-			priv->tlanFullDuplex = true;
+		if (priv->aui == 1) {
+			tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
+		} else if (priv->duplex == TLAN_DUPLEX_FULL) {
+			tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
+			priv->tlan_full_duplex = true;
 		} else {
-			TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 );
+			tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
 		}
 	}
 
-	if ( priv->phyNum == 0 ) {
+	if (priv->phy_num == 0)
 		data |= TLAN_NET_CFG_PHY_EN;
-	}
-	TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, (u16) data );
+	tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
 
-	if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
-		TLan_FinishReset( dev );
-	} else {
-		TLan_PhyPowerDown( dev );
-	}
+	if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
+		tlan_finish_reset(dev);
+	else
+		tlan_phy_power_down(dev);
 
-} /* TLan_ResetAdapter */
+}
 
 
 
 
 static void
-TLan_FinishReset( struct net_device *dev )
+tlan_finish_reset(struct net_device *dev)
 {
-	TLanPrivateInfo	*priv = netdev_priv(dev);
+	struct tlan_priv	*priv = netdev_priv(dev);
 	u8		data;
 	u32		phy;
 	u8		sio;
 	u16		status;
 	u16		partner;
 	u16		tlphy_ctl;
-	u16 		tlphy_par;
+	u16		tlphy_par;
 	u16		tlphy_id1, tlphy_id2;
-	int 		i;
+	int		i;
 
-	phy = priv->phy[priv->phyNum];
+	phy = priv->phy[priv->phy_num];
 
 	data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
-	if ( priv->tlanFullDuplex ) {
+	if (priv->tlan_full_duplex)
 		data |= TLAN_NET_CMD_DUPLEX;
-	}
-	TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, data );
+	tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
 	data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
-	if ( priv->phyNum == 0 ) {
+	if (priv->phy_num == 0)
 		data |= TLAN_NET_MASK_MASK7;
-	}
-	TLan_DioWrite8( dev->base_addr, TLAN_NET_MASK, data );
-	TLan_DioWrite16( dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7 );
-	TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 );
-	TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 );
+	tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
+	tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
+	tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
+	tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
 
-	if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) ||
-	     ( priv->aui ) ) {
+	if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
+	    (priv->aui)) {
 		status = MII_GS_LINK;
-		printk( "TLAN:  %s: Link forced.\n", dev->name );
+		pr_info("TLAN:  %s: Link forced.\n", dev->name);
 	} else {
-		TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
-		udelay( 1000 );
-		TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
-		if ( (status & MII_GS_LINK) &&
-		     /* We only support link info on Nat.Sem. PHY's */
-			(tlphy_id1 == NAT_SEM_ID1) &&
-			(tlphy_id2 == NAT_SEM_ID2) ) {
-			TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner );
-			TLan_MiiReadReg( dev, phy, TLAN_TLPHY_PAR, &tlphy_par );
+		tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+		udelay(1000);
+		tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+		if ((status & MII_GS_LINK) &&
+		    /* We only support link info on Nat.Sem. PHY's */
+		    (tlphy_id1 == NAT_SEM_ID1) &&
+		    (tlphy_id2 == NAT_SEM_ID2)) {
+			tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner);
+			tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par);
 
-			printk( "TLAN: %s: Link active with ", dev->name );
+			pr_info("TLAN: %s: Link active with ", dev->name);
 			if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) {
-			      	 printk( "forced 10%sMbps %s-Duplex\n",
-					 tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
-					 tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
+				pr_info("forced 10%sMbps %s-Duplex\n",
+					tlphy_par & TLAN_PHY_SPEED_100
+					? "" : "0",
+					tlphy_par & TLAN_PHY_DUPLEX_FULL
+					? "Full" : "Half");
 			} else {
-				printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n",
-					tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0",
-					tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half");
-				printk("TLAN: Partner capability: ");
-					for (i = 5; i <= 10; i++)
-						if (partner & (1<<i))
-							printk("%s",media[i-5]);
+				pr_info("Autonegotiation enabled, at 10%sMbps %s-Duplex\n",
+					tlphy_par & TLAN_PHY_SPEED_100
+					? "" : "0",
+					tlphy_par & TLAN_PHY_DUPLEX_FULL
+					? "Full" : "half");
+				pr_info("TLAN: Partner capability: ");
+				for (i = 5; i <= 10; i++)
+					if (partner & (1<<i))
+						printk("%s", media[i-5]);
 				printk("\n");
 			}
 
-			TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+			tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
+					TLAN_LED_LINK);
 #ifdef MONITOR
 			/* We have link beat..for now anyway */
-	        	priv->link = 1;
-	        	/*Enabling link beat monitoring */
-			TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_LINK_BEAT );
+			priv->link = 1;
+			/*Enabling link beat monitoring */
+			tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT);
 #endif
 		} else if (status & MII_GS_LINK)  {
-			printk( "TLAN: %s: Link active\n", dev->name );
-			TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK );
+			pr_info("TLAN: %s: Link active\n", dev->name);
+			tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
+					TLAN_LED_LINK);
 		}
 	}
 
-	if ( priv->phyNum == 0 ) {
-        	TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl );
-        	tlphy_ctl |= TLAN_TC_INTEN;
-        	TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl );
-        	sio = TLan_DioRead8( dev->base_addr, TLAN_NET_SIO );
-        	sio |= TLAN_NET_SIO_MINTEN;
-        	TLan_DioWrite8( dev->base_addr, TLAN_NET_SIO, sio );
+	if (priv->phy_num == 0) {
+		tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+		tlphy_ctl |= TLAN_TC_INTEN;
+		tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
+		sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
+		sio |= TLAN_NET_SIO_MINTEN;
+		tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
 	}
 
-	if ( status & MII_GS_LINK ) {
-		TLan_SetMac( dev, 0, dev->dev_addr );
-		priv->phyOnline = 1;
-		outb( ( TLAN_HC_INT_ON >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
-		if ( debug >= 1 && debug != TLAN_DEBUG_PROBE ) {
-			outb( ( TLAN_HC_REQ_INT >> 8 ), dev->base_addr + TLAN_HOST_CMD + 1 );
-		}
-		outl( priv->rxListDMA, dev->base_addr + TLAN_CH_PARM );
-		outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD );
+	if (status & MII_GS_LINK) {
+		tlan_set_mac(dev, 0, dev->dev_addr);
+		priv->phy_online = 1;
+		outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
+		if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
+			outb((TLAN_HC_REQ_INT >> 8),
+			     dev->base_addr + TLAN_HOST_CMD + 1);
+		outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
+		outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
 		netif_carrier_on(dev);
 	} else {
-		printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n",
-			dev->name );
-		TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET );
+		pr_info("TLAN: %s: Link inactive, will retry in 10 secs...\n",
+		       dev->name);
+		tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
 		return;
 	}
-	TLan_SetMulticastList(dev);
+	tlan_set_multicast_list(dev);
 
-} /* TLan_FinishReset */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_SetMac
-	 *
-	 *	Returns:
-	 *		Nothing
-	 *	Parms:
-	 *		dev	Pointer to device structure of adapter
-	 *			on which to change the AREG.
-	 *		areg	The AREG to set the address in (0 - 3).
-	 *		mac	A pointer to an array of chars.  Each
-	 *			element stores one byte of the address.
-	 *			IE, it isn't in ascii.
-	 *
-	 *	This function transfers a MAC address to one of the
-	 *	TLAN AREGs (address registers).  The TLAN chip locks
-	 *	the register on writing to offset 0 and unlocks the
-	 *	register after writing to offset 5.  If NULL is passed
-	 *	in mac, then the AREG is filled with 0's.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_set_mac
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		dev	Pointer to device structure of adapter
+ *			on which to change the AREG.
+ *		areg	The AREG to set the address in (0 - 3).
+ *		mac	A pointer to an array of chars.  Each
+ *			element stores one byte of the address.
+ *			IE, it isn't in ascii.
+ *
+ *	This function transfers a MAC address to one of the
+ *	TLAN AREGs (address registers).  The TLAN chip locks
+ *	the register on writing to offset 0 and unlocks the
+ *	register after writing to offset 5.  If NULL is passed
+ *	in mac, then the AREG is filled with 0's.
+ *
+ **************************************************************/
 
-static void TLan_SetMac( struct net_device *dev, int areg, char *mac )
+static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
 {
 	int i;
 
 	areg *= 6;
 
-	if ( mac != NULL ) {
-		for ( i = 0; i < 6; i++ )
-			TLan_DioWrite8( dev->base_addr,
-					TLAN_AREG_0 + areg + i, mac[i] );
+	if (mac != NULL) {
+		for (i = 0; i < 6; i++)
+			tlan_dio_write8(dev->base_addr,
+					TLAN_AREG_0 + areg + i, mac[i]);
 	} else {
-		for ( i = 0; i < 6; i++ )
-			TLan_DioWrite8( dev->base_addr,
-					TLAN_AREG_0 + areg + i, 0 );
+		for (i = 0; i < 6; i++)
+			tlan_dio_write8(dev->base_addr,
+					TLAN_AREG_0 + areg + i, 0);
 	}
 
-} /* TLan_SetMac */
+}
 
 
 
@@ -2432,205 +2524,202 @@
 /*****************************************************************************
 ******************************************************************************
 
-	ThunderLAN Driver PHY Layer Routines
+ThunderLAN driver PHY layer routines
 
 ******************************************************************************
 *****************************************************************************/
 
 
 
-	/*********************************************************************
-	 *	TLan_PhyPrint
-	 *
-	 *	Returns:
-	 *		Nothing
-	 *	Parms:
-	 *		dev	A pointer to the device structure of the
-	 *			TLAN device having the PHYs to be detailed.
-	 *
-	 *	This function prints the registers a PHY (aka transceiver).
-	 *
-	 ********************************************************************/
+/*********************************************************************
+ *	tlan_phy_print
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		dev	A pointer to the device structure of the
+ *			TLAN device having the PHYs to be detailed.
+ *
+ *	This function prints the registers a PHY (aka transceiver).
+ *
+ ********************************************************************/
 
-static void TLan_PhyPrint( struct net_device *dev )
+static void tlan_phy_print(struct net_device *dev)
 {
-	TLanPrivateInfo *priv = netdev_priv(dev);
+	struct tlan_priv *priv = netdev_priv(dev);
 	u16 i, data0, data1, data2, data3, phy;
 
-	phy = priv->phy[priv->phyNum];
+	phy = priv->phy[priv->phy_num];
 
-	if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
-		printk( "TLAN:   Device %s, Unmanaged PHY.\n", dev->name );
-	} else if ( phy <= TLAN_PHY_MAX_ADDR ) {
-		printk( "TLAN:   Device %s, PHY 0x%02x.\n", dev->name, phy );
-		printk( "TLAN:      Off.  +0     +1     +2     +3\n" );
-                for ( i = 0; i < 0x20; i+= 4 ) {
-			printk( "TLAN:      0x%02x", i );
-			TLan_MiiReadReg( dev, phy, i, &data0 );
-			printk( " 0x%04hx", data0 );
-			TLan_MiiReadReg( dev, phy, i + 1, &data1 );
-			printk( " 0x%04hx", data1 );
-			TLan_MiiReadReg( dev, phy, i + 2, &data2 );
-			printk( " 0x%04hx", data2 );
-			TLan_MiiReadReg( dev, phy, i + 3, &data3 );
-			printk( " 0x%04hx\n", data3 );
+	if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+		pr_info("TLAN:   Device %s, Unmanaged PHY.\n", dev->name);
+	} else if (phy <= TLAN_PHY_MAX_ADDR) {
+		pr_info("TLAN:   Device %s, PHY 0x%02x.\n", dev->name, phy);
+		pr_info("TLAN:      Off.  +0     +1     +2     +3\n");
+		for (i = 0; i < 0x20; i += 4) {
+			pr_info("TLAN:      0x%02x", i);
+			tlan_mii_read_reg(dev, phy, i, &data0);
+			printk(" 0x%04hx", data0);
+			tlan_mii_read_reg(dev, phy, i + 1, &data1);
+			printk(" 0x%04hx", data1);
+			tlan_mii_read_reg(dev, phy, i + 2, &data2);
+			printk(" 0x%04hx", data2);
+			tlan_mii_read_reg(dev, phy, i + 3, &data3);
+			printk(" 0x%04hx\n", data3);
 		}
 	} else {
-		printk( "TLAN:   Device %s, Invalid PHY.\n", dev->name );
+		pr_info("TLAN:   Device %s, Invalid PHY.\n", dev->name);
 	}
 
-} /* TLan_PhyPrint */
+}
 
 
 
 
-	/*********************************************************************
-	 *	TLan_PhyDetect
-	 *
-	 *	Returns:
-	 *		Nothing
-	 *	Parms:
-	 *		dev	A pointer to the device structure of the adapter
-	 *			for which the PHY needs determined.
-	 *
-	 *	So far I've found that adapters which have external PHYs
-	 *	may also use the internal PHY for part of the functionality.
-	 *	(eg, AUI/Thinnet).  This function finds out if this TLAN
-	 *	chip has an internal PHY, and then finds the first external
-	 *	PHY (starting from address 0) if it exists).
-	 *
-	 ********************************************************************/
+/*********************************************************************
+ *	tlan_phy_detect
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		dev	A pointer to the device structure of the adapter
+ *			for which the PHY needs determined.
+ *
+ *	So far I've found that adapters which have external PHYs
+ *	may also use the internal PHY for part of the functionality.
+ *	(eg, AUI/Thinnet).  This function finds out if this TLAN
+ *	chip has an internal PHY, and then finds the first external
+ *	PHY (starting from address 0) if it exists).
+ *
+ ********************************************************************/
 
-static void TLan_PhyDetect( struct net_device *dev )
+static void tlan_phy_detect(struct net_device *dev)
 {
-	TLanPrivateInfo *priv = netdev_priv(dev);
+	struct tlan_priv *priv = netdev_priv(dev);
 	u16		control;
 	u16		hi;
 	u16		lo;
 	u32		phy;
 
-	if ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) {
-		priv->phyNum = 0xFFFF;
+	if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+		priv->phy_num = 0xffff;
 		return;
 	}
 
-	TLan_MiiReadReg( dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi );
+	tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
 
-	if ( hi != 0xFFFF ) {
+	if (hi != 0xffff)
 		priv->phy[0] = TLAN_PHY_MAX_ADDR;
-	} else {
+	else
 		priv->phy[0] = TLAN_PHY_NONE;
-	}
 
 	priv->phy[1] = TLAN_PHY_NONE;
-	for ( phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++ ) {
-		TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control );
-		TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi );
-		TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo );
-		if ( ( control != 0xFFFF ) ||
-		     ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) {
-			TLAN_DBG( TLAN_DEBUG_GNRL,
-				  "PHY found at %02x %04x %04x %04x\n",
-				  phy, control, hi, lo );
-			if ( ( priv->phy[1] == TLAN_PHY_NONE ) &&
-			     ( phy != TLAN_PHY_MAX_ADDR ) ) {
+	for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
+		tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
+		tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
+		tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
+		if ((control != 0xffff) ||
+		    (hi != 0xffff) || (lo != 0xffff)) {
+			TLAN_DBG(TLAN_DEBUG_GNRL,
+				 "PHY found at %02x %04x %04x %04x\n",
+				 phy, control, hi, lo);
+			if ((priv->phy[1] == TLAN_PHY_NONE) &&
+			    (phy != TLAN_PHY_MAX_ADDR)) {
 				priv->phy[1] = phy;
 			}
 		}
 	}
 
-	if ( priv->phy[1] != TLAN_PHY_NONE ) {
-		priv->phyNum = 1;
-	} else if ( priv->phy[0] != TLAN_PHY_NONE ) {
-		priv->phyNum = 0;
-	} else {
-		printk( "TLAN:  Cannot initialize device, no PHY was found!\n" );
-	}
+	if (priv->phy[1] != TLAN_PHY_NONE)
+		priv->phy_num = 1;
+	else if (priv->phy[0] != TLAN_PHY_NONE)
+		priv->phy_num = 0;
+	else
+		pr_info("TLAN:  Cannot initialize device, no PHY was found!\n");
 
-} /* TLan_PhyDetect */
+}
 
 
 
 
-static void TLan_PhyPowerDown( struct net_device *dev )
+static void tlan_phy_power_down(struct net_device *dev)
 {
-	TLanPrivateInfo	*priv = netdev_priv(dev);
+	struct tlan_priv	*priv = netdev_priv(dev);
 	u16		value;
 
-	TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name );
+	TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
 	value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
-	TLan_MiiSync( dev->base_addr );
-	TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
-	if ( ( priv->phyNum == 0 ) &&
-	     ( priv->phy[1] != TLAN_PHY_NONE ) &&
-	     ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) {
-		TLan_MiiSync( dev->base_addr );
-		TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value );
+	tlan_mii_sync(dev->base_addr);
+	tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+	if ((priv->phy_num == 0) &&
+	    (priv->phy[1] != TLAN_PHY_NONE) &&
+	    (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) {
+		tlan_mii_sync(dev->base_addr);
+		tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
 	}
 
 	/* Wait for 50 ms and powerup
 	 * This is abitrary.  It is intended to make sure the
 	 * transceiver settles.
 	 */
-	TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_PUP );
+	tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP);
 
-} /* TLan_PhyPowerDown */
+}
 
 
 
 
-static void TLan_PhyPowerUp( struct net_device *dev )
+static void tlan_phy_power_up(struct net_device *dev)
 {
-	TLanPrivateInfo	*priv = netdev_priv(dev);
+	struct tlan_priv	*priv = netdev_priv(dev);
 	u16		value;
 
-	TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name );
-	TLan_MiiSync( dev->base_addr );
+	TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
+	tlan_mii_sync(dev->base_addr);
 	value = MII_GC_LOOPBK;
-	TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value );
-	TLan_MiiSync(dev->base_addr);
+	tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+	tlan_mii_sync(dev->base_addr);
 	/* Wait for 500 ms and reset the
 	 * transceiver.  The TLAN docs say both 50 ms and
 	 * 500 ms, so do the longer, just in case.
 	 */
-	TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_RESET );
+	tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET);
 
-} /* TLan_PhyPowerUp */
+}
 
 
 
 
-static void TLan_PhyReset( struct net_device *dev )
+static void tlan_phy_reset(struct net_device *dev)
 {
-	TLanPrivateInfo	*priv = netdev_priv(dev);
+	struct tlan_priv	*priv = netdev_priv(dev);
 	u16		phy;
 	u16		value;
 
-	phy = priv->phy[priv->phyNum];
+	phy = priv->phy[priv->phy_num];
 
-	TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name );
-	TLan_MiiSync( dev->base_addr );
+	TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name);
+	tlan_mii_sync(dev->base_addr);
 	value = MII_GC_LOOPBK | MII_GC_RESET;
-	TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, value );
-	TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
-	while ( value & MII_GC_RESET ) {
-		TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &value );
-	}
+	tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
+	tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
+	while (value & MII_GC_RESET)
+		tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
 
 	/* Wait for 500 ms and initialize.
 	 * I don't remember why I wait this long.
 	 * I've changed this to 50ms, as it seems long enough.
 	 */
-	TLan_SetTimer( dev, (HZ/20), TLAN_TIMER_PHY_START_LINK );
+	tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK);
 
-} /* TLan_PhyReset */
+}
 
 
 
 
-static void TLan_PhyStartLink( struct net_device *dev )
+static void tlan_phy_start_link(struct net_device *dev)
 {
-	TLanPrivateInfo	*priv = netdev_priv(dev);
+	struct tlan_priv	*priv = netdev_priv(dev);
 	u16		ability;
 	u16		control;
 	u16		data;
@@ -2638,86 +2727,88 @@
 	u16		status;
 	u16		tctl;
 
-	phy = priv->phy[priv->phyNum];
-	TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name );
-	TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
-	TLan_MiiReadReg( dev, phy, MII_GEN_STS, &ability );
+	phy = priv->phy[priv->phy_num];
+	TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
+	tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+	tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
 
-	if ( ( status & MII_GS_AUTONEG ) &&
-	     ( ! priv->aui ) ) {
+	if ((status & MII_GS_AUTONEG) &&
+	    (!priv->aui)) {
 		ability = status >> 11;
-		if ( priv->speed  == TLAN_SPEED_10 &&
-		     priv->duplex == TLAN_DUPLEX_HALF) {
-			TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000);
-		} else if ( priv->speed == TLAN_SPEED_10 &&
-			    priv->duplex == TLAN_DUPLEX_FULL) {
-			priv->tlanFullDuplex = true;
-			TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100);
-		} else if ( priv->speed == TLAN_SPEED_100 &&
-			    priv->duplex == TLAN_DUPLEX_HALF) {
-			TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000);
-		} else if ( priv->speed == TLAN_SPEED_100 &&
-			    priv->duplex == TLAN_DUPLEX_FULL) {
-			priv->tlanFullDuplex = true;
-			TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100);
+		if (priv->speed  == TLAN_SPEED_10 &&
+		    priv->duplex == TLAN_DUPLEX_HALF) {
+			tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
+		} else if (priv->speed == TLAN_SPEED_10 &&
+			   priv->duplex == TLAN_DUPLEX_FULL) {
+			priv->tlan_full_duplex = true;
+			tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
+		} else if (priv->speed == TLAN_SPEED_100 &&
+			   priv->duplex == TLAN_DUPLEX_HALF) {
+			tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
+		} else if (priv->speed == TLAN_SPEED_100 &&
+			   priv->duplex == TLAN_DUPLEX_FULL) {
+			priv->tlan_full_duplex = true;
+			tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
 		} else {
 
 			/* Set Auto-Neg advertisement */
-			TLan_MiiWriteReg( dev, phy, MII_AN_ADV, (ability << 5) | 1);
+			tlan_mii_write_reg(dev, phy, MII_AN_ADV,
+					   (ability << 5) | 1);
 			/* Enablee Auto-Neg */
-			TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1000 );
+			tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
 			/* Restart Auto-Neg */
-			TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x1200 );
+			tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
 			/* Wait for 4 sec for autonegotiation
-		 	* to complete.  The max spec time is less than this
-		 	* but the card need additional time to start AN.
-		 	* .5 sec should be plenty extra.
-		 	*/
-			printk( "TLAN: %s: Starting autonegotiation.\n", dev->name );
-			TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN );
+			 * to complete.  The max spec time is less than this
+			 * but the card need additional time to start AN.
+			 * .5 sec should be plenty extra.
+			 */
+			pr_info("TLAN: %s: Starting autonegotiation.\n",
+				dev->name);
+			tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
 			return;
 		}
 
 	}
 
-	if ( ( priv->aui ) && ( priv->phyNum != 0 ) ) {
-		priv->phyNum = 0;
-		data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
-		TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
-		TLan_SetTimer( dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN );
+	if ((priv->aui) && (priv->phy_num != 0)) {
+		priv->phy_num = 0;
+		data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
+			| TLAN_NET_CFG_PHY_EN;
+		tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
+		tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN);
 		return;
-	}  else if ( priv->phyNum == 0 ) {
+	} else if (priv->phy_num == 0) {
 		control = 0;
-        	TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tctl );
-		if ( priv->aui ) {
-                	tctl |= TLAN_TC_AUISEL;
+		tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
+		if (priv->aui) {
+			tctl |= TLAN_TC_AUISEL;
 		} else {
-                	tctl &= ~TLAN_TC_AUISEL;
-			if ( priv->duplex == TLAN_DUPLEX_FULL ) {
+			tctl &= ~TLAN_TC_AUISEL;
+			if (priv->duplex == TLAN_DUPLEX_FULL) {
 				control |= MII_GC_DUPLEX;
-				priv->tlanFullDuplex = true;
+				priv->tlan_full_duplex = true;
 			}
-			if ( priv->speed == TLAN_SPEED_100 ) {
+			if (priv->speed == TLAN_SPEED_100)
 				control |= MII_GC_SPEEDSEL;
-			}
 		}
-		TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, control );
-        	TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tctl );
+		tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
+		tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
 	}
 
 	/* Wait for 2 sec to give the transceiver time
 	 * to establish link.
 	 */
-	TLan_SetTimer( dev, (4*HZ), TLAN_TIMER_FINISH_RESET );
+	tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
 
-} /* TLan_PhyStartLink */
+}
 
 
 
 
-static void TLan_PhyFinishAutoNeg( struct net_device *dev )
+static void tlan_phy_finish_auto_neg(struct net_device *dev)
 {
-	TLanPrivateInfo	*priv = netdev_priv(dev);
+	struct tlan_priv	*priv = netdev_priv(dev);
 	u16		an_adv;
 	u16		an_lpa;
 	u16		data;
@@ -2725,115 +2816,118 @@
 	u16		phy;
 	u16		status;
 
-	phy = priv->phy[priv->phyNum];
+	phy = priv->phy[priv->phy_num];
 
-	TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
-	udelay( 1000 );
-	TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status );
+	tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+	udelay(1000);
+	tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
 
-	if ( ! ( status & MII_GS_AUTOCMPLT ) ) {
+	if (!(status & MII_GS_AUTOCMPLT)) {
 		/* Wait for 8 sec to give the process
 		 * more time.  Perhaps we should fail after a while.
 		 */
-		 if (!priv->neg_be_verbose++) {
-			 pr_info("TLAN:  Giving autonegotiation more time.\n");
-		 	 pr_info("TLAN:  Please check that your adapter has\n");
-		 	 pr_info("TLAN:  been properly connected to a HUB or Switch.\n");
-			 pr_info("TLAN:  Trying to establish link in the background...\n");
-		 }
-		TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN );
+		if (!priv->neg_be_verbose++) {
+			pr_info("TLAN:  Giving autonegotiation more time.\n");
+			pr_info("TLAN:  Please check that your adapter has\n");
+			pr_info("TLAN:  been properly connected to a HUB or Switch.\n");
+			pr_info("TLAN:  Trying to establish link in the background...\n");
+		}
+		tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN);
 		return;
 	}
 
-	printk( "TLAN: %s: Autonegotiation complete.\n", dev->name );
-	TLan_MiiReadReg( dev, phy, MII_AN_ADV, &an_adv );
-	TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa );
+	pr_info("TLAN: %s: Autonegotiation complete.\n", dev->name);
+	tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
+	tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
 	mode = an_adv & an_lpa & 0x03E0;
-	if ( mode & 0x0100 ) {
-		priv->tlanFullDuplex = true;
-	} else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) {
-		priv->tlanFullDuplex = true;
-	}
+	if (mode & 0x0100)
+		priv->tlan_full_duplex = true;
+	else if (!(mode & 0x0080) && (mode & 0x0040))
+		priv->tlan_full_duplex = true;
 
-	if ( ( ! ( mode & 0x0180 ) ) &&
-	     ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) &&
-	     ( priv->phyNum != 0 ) ) {
-		priv->phyNum = 0;
-		data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
-		TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data );
-		TLan_SetTimer( dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN );
+	if ((!(mode & 0x0180)) &&
+	    (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
+	    (priv->phy_num != 0)) {
+		priv->phy_num = 0;
+		data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
+			| TLAN_NET_CFG_PHY_EN;
+		tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
+		tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
 		return;
 	}
 
-	if ( priv->phyNum == 0 ) {
-		if ( ( priv->duplex == TLAN_DUPLEX_FULL ) ||
-		     ( an_adv & an_lpa & 0x0040 ) ) {
-			TLan_MiiWriteReg( dev, phy, MII_GEN_CTL,
-					  MII_GC_AUTOENB | MII_GC_DUPLEX );
-			pr_info("TLAN:  Starting internal PHY with FULL-DUPLEX\n" );
+	if (priv->phy_num == 0) {
+		if ((priv->duplex == TLAN_DUPLEX_FULL) ||
+		    (an_adv & an_lpa & 0x0040)) {
+			tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+					   MII_GC_AUTOENB | MII_GC_DUPLEX);
+			pr_info("TLAN:  Starting internal PHY with FULL-DUPLEX\n");
 		} else {
-			TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB );
-			pr_info( "TLAN:  Starting internal PHY with HALF-DUPLEX\n" );
+			tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+					   MII_GC_AUTOENB);
+			pr_info("TLAN:  Starting internal PHY with HALF-DUPLEX\n");
 		}
 	}
 
 	/* Wait for 100 ms.  No reason in partiticular.
 	 */
-	TLan_SetTimer( dev, (HZ/10), TLAN_TIMER_FINISH_RESET );
+	tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET);
 
-} /* TLan_PhyFinishAutoNeg */
+}
 
 #ifdef MONITOR
 
-        /*********************************************************************
-        *
-        *      TLan_phyMonitor
-        *
-        *      Returns:
-        *              None
-        *
-        *      Params:
-        *              dev             The device structure of this device.
-        *
-        *
-        *      This function monitors PHY condition by reading the status
-        *      register via the MII bus. This can be used to give info
-        *      about link changes (up/down), and possible switch to alternate
-        *      media.
-        *
-        * ******************************************************************/
+/*********************************************************************
+ *
+ *     tlan_phy_monitor
+ *
+ *     Returns:
+ *	      None
+ *
+ *     Params:
+ *	      dev	     The device structure of this device.
+ *
+ *
+ *     This function monitors PHY condition by reading the status
+ *     register via the MII bus. This can be used to give info
+ *     about link changes (up/down), and possible switch to alternate
+ *     media.
+ *
+ *******************************************************************/
 
-void TLan_PhyMonitor( struct net_device *dev )
+void tlan_phy_monitor(struct net_device *dev)
 {
-	TLanPrivateInfo *priv = netdev_priv(dev);
+	struct tlan_priv *priv = netdev_priv(dev);
 	u16     phy;
 	u16     phy_status;
 
-	phy = priv->phy[priv->phyNum];
+	phy = priv->phy[priv->phy_num];
 
-        /* Get PHY status register */
-        TLan_MiiReadReg( dev, phy, MII_GEN_STS, &phy_status );
+	/* Get PHY status register */
+	tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
 
-        /* Check if link has been lost */
-        if (!(phy_status & MII_GS_LINK)) {
- 	       if (priv->link) {
-		      priv->link = 0;
-	              printk(KERN_DEBUG "TLAN: %s has lost link\n", dev->name);
-		      netif_carrier_off(dev);
-		      TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT );
-		      return;
+	/* Check if link has been lost */
+	if (!(phy_status & MII_GS_LINK)) {
+		if (priv->link) {
+			priv->link = 0;
+			printk(KERN_DEBUG "TLAN: %s has lost link\n",
+			       dev->name);
+			netif_carrier_off(dev);
+			tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
+			return;
 		}
 	}
 
-        /* Link restablished? */
-        if ((phy_status & MII_GS_LINK) && !priv->link) {
- 		priv->link = 1;
-        	printk(KERN_DEBUG "TLAN: %s has reestablished link\n", dev->name);
+	/* Link restablished? */
+	if ((phy_status & MII_GS_LINK) && !priv->link) {
+		priv->link = 1;
+		printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
+		       dev->name);
 		netif_carrier_on(dev);
-        }
+	}
 
 	/* Setup a new monitor */
-	TLan_SetTimer( dev, (2*HZ), TLAN_TIMER_LINK_BEAT );
+	tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
 }
 
 #endif /* MONITOR */
@@ -2842,47 +2936,48 @@
 /*****************************************************************************
 ******************************************************************************
 
-	ThunderLAN Driver MII Routines
+ThunderLAN driver MII routines
 
-	These routines are based on the information in Chap. 2 of the
-	"ThunderLAN Programmer's Guide", pp. 15-24.
+these routines are based on the information in chap. 2 of the
+"ThunderLAN Programmer's Guide", pp. 15-24.
 
 ******************************************************************************
 *****************************************************************************/
 
 
-	/***************************************************************
-	 *	TLan_MiiReadReg
-	 *
-	 *	Returns:
-	 *		false	if ack received ok
-	 *		true	if no ack received or other error
-	 *
-	 *	Parms:
-	 *		dev		The device structure containing
-	 *				The io address and interrupt count
-	 *				for this device.
-	 *		phy		The address of the PHY to be queried.
-	 *		reg		The register whose contents are to be
-	 *				retrieved.
-	 *		val		A pointer to a variable to store the
-	 *				retrieved value.
-	 *
-	 *	This function uses the TLAN's MII bus to retrieve the contents
-	 *	of a given register on a PHY.  It sends the appropriate info
-	 *	and then reads the 16-bit register value from the MII bus via
-	 *	the TLAN SIO register.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_mii_read_reg
+ *
+ *	Returns:
+ *		false	if ack received ok
+ *		true	if no ack received or other error
+ *
+ *	Parms:
+ *		dev		The device structure containing
+ *				The io address and interrupt count
+ *				for this device.
+ *		phy		The address of the PHY to be queried.
+ *		reg		The register whose contents are to be
+ *				retrieved.
+ *		val		A pointer to a variable to store the
+ *				retrieved value.
+ *
+ *	This function uses the TLAN's MII bus to retrieve the contents
+ *	of a given register on a PHY.  It sends the appropriate info
+ *	and then reads the 16-bit register value from the MII bus via
+ *	the TLAN SIO register.
+ *
+ **************************************************************/
 
-static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
+static bool
+tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
 {
 	u8	nack;
 	u16	sio, tmp;
- 	u32	i;
+	u32	i;
 	bool	err;
 	int	minten;
-	TLanPrivateInfo *priv = netdev_priv(dev);
+	struct tlan_priv *priv = netdev_priv(dev);
 	unsigned long flags = 0;
 
 	err = false;
@@ -2892,48 +2987,48 @@
 	if (!in_irq())
 		spin_lock_irqsave(&priv->lock, flags);
 
-	TLan_MiiSync(dev->base_addr);
+	tlan_mii_sync(dev->base_addr);
 
-	minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
-	if ( minten )
-		TLan_ClearBit(TLAN_NET_SIO_MINTEN, sio);
+	minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+	if (minten)
+		tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
 
-	TLan_MiiSendData( dev->base_addr, 0x1, 2 );	/* Start ( 01b ) */
-	TLan_MiiSendData( dev->base_addr, 0x2, 2 );	/* Read  ( 10b ) */
-	TLan_MiiSendData( dev->base_addr, phy, 5 );	/* Device #      */
-	TLan_MiiSendData( dev->base_addr, reg, 5 );	/* Register #    */
+	tlan_mii_send_data(dev->base_addr, 0x1, 2);	/* start (01b) */
+	tlan_mii_send_data(dev->base_addr, 0x2, 2);	/* read  (10b) */
+	tlan_mii_send_data(dev->base_addr, phy, 5);	/* device #      */
+	tlan_mii_send_data(dev->base_addr, reg, 5);	/* register #    */
 
 
-	TLan_ClearBit(TLAN_NET_SIO_MTXEN, sio);		/* Change direction */
+	tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);	/* change direction */
 
-	TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);		/* Clock Idle bit */
-	TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
-	TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);		/* Wait 300ns */
+	tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);		/* clock idle bit */
+	tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+	tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);		/* wait 300ns */
 
-	nack = TLan_GetBit(TLAN_NET_SIO_MDATA, sio);	/* Check for ACK */
-	TLan_SetBit(TLAN_NET_SIO_MCLK, sio);		/* Finish ACK */
-	if (nack) {					/* No ACK, so fake it */
+	nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio);	/* check for ACK */
+	tlan_set_bit(TLAN_NET_SIO_MCLK, sio);		/* finish ACK */
+	if (nack) {					/* no ACK, so fake it */
 		for (i = 0; i < 16; i++) {
-			TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
-			TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+			tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+			tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
 		}
 		tmp = 0xffff;
 		err = true;
 	} else {					/* ACK, so read data */
 		for (tmp = 0, i = 0x8000; i; i >>= 1) {
-			TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
-			if (TLan_GetBit(TLAN_NET_SIO_MDATA, sio))
+			tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+			if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
 				tmp |= i;
-			TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+			tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
 		}
 	}
 
 
-	TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);		/* Idle cycle */
-	TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
+	tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);		/* idle cycle */
+	tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
 
-	if ( minten )
-		TLan_SetBit(TLAN_NET_SIO_MINTEN, sio);
+	if (minten)
+		tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
 
 	*val = tmp;
 
@@ -2942,116 +3037,117 @@
 
 	return err;
 
-} /* TLan_MiiReadReg */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_MiiSendData
-	 *
-	 *	Returns:
-	 *		Nothing
-	 *	Parms:
-	 *		base_port	The base IO port of the adapter	in
-	 *				question.
-	 *		dev		The address of the PHY to be queried.
-	 *		data		The value to be placed on the MII bus.
-	 *		num_bits	The number of bits in data that are to
-	 *				be placed on the MII bus.
-	 *
-	 *	This function sends on sequence of bits on the MII
-	 *	configuration bus.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_mii_send_data
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		base_port	The base IO port of the adapter	in
+ *				question.
+ *		dev		The address of the PHY to be queried.
+ *		data		The value to be placed on the MII bus.
+ *		num_bits	The number of bits in data that are to
+ *				be placed on the MII bus.
+ *
+ *	This function sends on sequence of bits on the MII
+ *	configuration bus.
+ *
+ **************************************************************/
 
-static void TLan_MiiSendData( u16 base_port, u32 data, unsigned num_bits )
+static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
 {
 	u16 sio;
 	u32 i;
 
-	if ( num_bits == 0 )
+	if (num_bits == 0)
 		return;
 
-	outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
+	outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
 	sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
-	TLan_SetBit( TLAN_NET_SIO_MTXEN, sio );
+	tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
 
-	for ( i = ( 0x1 << ( num_bits - 1 ) ); i; i >>= 1 ) {
-		TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
-		(void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
-		if ( data & i )
-			TLan_SetBit( TLAN_NET_SIO_MDATA, sio );
+	for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
+		tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+		(void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
+		if (data & i)
+			tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
 		else
-			TLan_ClearBit( TLAN_NET_SIO_MDATA, sio );
-		TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
-		(void) TLan_GetBit( TLAN_NET_SIO_MCLK, sio );
+			tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
+		tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+		(void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
 	}
 
-} /* TLan_MiiSendData */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_MiiSync
-	 *
-	 *	Returns:
-	 *		Nothing
-	 *	Parms:
-	 *		base_port	The base IO port of the adapter in
-	 *				question.
-	 *
-	 *	This functions syncs all PHYs in terms of the MII configuration
-	 *	bus.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	TLan_MiiSync
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		base_port	The base IO port of the adapter in
+ *				question.
+ *
+ *	This functions syncs all PHYs in terms of the MII configuration
+ *	bus.
+ *
+ **************************************************************/
 
-static void TLan_MiiSync( u16 base_port )
+static void tlan_mii_sync(u16 base_port)
 {
 	int i;
 	u16 sio;
 
-	outw( TLAN_NET_SIO, base_port + TLAN_DIO_ADR );
+	outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
 	sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
 
-	TLan_ClearBit( TLAN_NET_SIO_MTXEN, sio );
-	for ( i = 0; i < 32; i++ ) {
-		TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );
-		TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+	tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
+	for (i = 0; i < 32; i++) {
+		tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+		tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
 	}
 
-} /* TLan_MiiSync */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_MiiWriteReg
-	 *
-	 *	Returns:
-	 *		Nothing
-	 *	Parms:
-	 *		dev		The device structure for the device
-	 *				to write to.
-	 *		phy		The address of the PHY to be written to.
-	 *		reg		The register whose contents are to be
-	 *				written.
-	 *		val		The value to be written to the register.
-	 *
-	 *	This function uses the TLAN's MII bus to write the contents of a
-	 *	given register on a PHY.  It sends the appropriate info and then
-	 *	writes the 16-bit register value from the MII configuration bus
-	 *	via the TLAN SIO register.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_mii_write_reg
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		dev		The device structure for the device
+ *				to write to.
+ *		phy		The address of the PHY to be written to.
+ *		reg		The register whose contents are to be
+ *				written.
+ *		val		The value to be written to the register.
+ *
+ *	This function uses the TLAN's MII bus to write the contents of a
+ *	given register on a PHY.  It sends the appropriate info and then
+ *	writes the 16-bit register value from the MII configuration bus
+ *	via the TLAN SIO register.
+ *
+ **************************************************************/
 
-static void TLan_MiiWriteReg( struct net_device *dev, u16 phy, u16 reg, u16 val )
+static void
+tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
 {
 	u16	sio;
 	int	minten;
 	unsigned long flags = 0;
-	TLanPrivateInfo *priv = netdev_priv(dev);
+	struct tlan_priv *priv = netdev_priv(dev);
 
 	outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
 	sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
@@ -3059,30 +3155,30 @@
 	if (!in_irq())
 		spin_lock_irqsave(&priv->lock, flags);
 
-	TLan_MiiSync( dev->base_addr );
+	tlan_mii_sync(dev->base_addr);
 
-	minten = TLan_GetBit( TLAN_NET_SIO_MINTEN, sio );
-	if ( minten )
-		TLan_ClearBit( TLAN_NET_SIO_MINTEN, sio );
+	minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+	if (minten)
+		tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
 
-	TLan_MiiSendData( dev->base_addr, 0x1, 2 );	/* Start ( 01b ) */
-	TLan_MiiSendData( dev->base_addr, 0x1, 2 );	/* Write ( 01b ) */
-	TLan_MiiSendData( dev->base_addr, phy, 5 );	/* Device #      */
-	TLan_MiiSendData( dev->base_addr, reg, 5 );	/* Register #    */
+	tlan_mii_send_data(dev->base_addr, 0x1, 2);	/* start (01b) */
+	tlan_mii_send_data(dev->base_addr, 0x1, 2);	/* write (01b) */
+	tlan_mii_send_data(dev->base_addr, phy, 5);	/* device #      */
+	tlan_mii_send_data(dev->base_addr, reg, 5);	/* register #    */
 
-	TLan_MiiSendData( dev->base_addr, 0x2, 2 );	/* Send ACK */
-	TLan_MiiSendData( dev->base_addr, val, 16 );	/* Send Data */
+	tlan_mii_send_data(dev->base_addr, 0x2, 2);	/* send ACK */
+	tlan_mii_send_data(dev->base_addr, val, 16);	/* send data */
 
-	TLan_ClearBit( TLAN_NET_SIO_MCLK, sio );	/* Idle cycle */
-	TLan_SetBit( TLAN_NET_SIO_MCLK, sio );
+	tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);	/* idle cycle */
+	tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
 
-	if ( minten )
-		TLan_SetBit( TLAN_NET_SIO_MINTEN, sio );
+	if (minten)
+		tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
 
 	if (!in_irq())
 		spin_unlock_irqrestore(&priv->lock, flags);
 
-} /* TLan_MiiWriteReg */
+}
 
 
 
@@ -3090,229 +3186,226 @@
 /*****************************************************************************
 ******************************************************************************
 
-	ThunderLAN Driver Eeprom routines
+ThunderLAN driver eeprom routines
 
-	The Compaq Netelligent 10 and 10/100 cards use a Microchip 24C02A
-	EEPROM.  These functions are based on information in Microchip's
-	data sheet.  I don't know how well this functions will work with
-	other EEPROMs.
+the Compaq netelligent 10 and 10/100 cards use a microchip 24C02A
+EEPROM.  these functions are based on information in microchip's
+data sheet.  I don't know how well this functions will work with
+other Eeproms.
 
 ******************************************************************************
 *****************************************************************************/
 
 
-	/***************************************************************
-	 *	TLan_EeSendStart
-	 *
-	 *	Returns:
-	 *		Nothing
-	 *	Parms:
-	 *		io_base		The IO port base address for the
-	 *				TLAN device with the EEPROM to
-	 *				use.
-	 *
-	 *	This function sends a start cycle to an EEPROM attached
-	 *	to a TLAN chip.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_ee_send_start
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		io_base		The IO port base address for the
+ *				TLAN device with the EEPROM to
+ *				use.
+ *
+ *	This function sends a start cycle to an EEPROM attached
+ *	to a TLAN chip.
+ *
+ **************************************************************/
 
-static void TLan_EeSendStart( u16 io_base )
+static void tlan_ee_send_start(u16 io_base)
 {
 	u16	sio;
 
-	outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+	outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
 	sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
 
-	TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-	TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
-	TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
-	TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
-	TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+	tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+	tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+	tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+	tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+	tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
 
-} /* TLan_EeSendStart */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_EeSendByte
-	 *
-	 *	Returns:
-	 *		If the correct ack was received, 0, otherwise 1
-	 *	Parms:	io_base		The IO port base address for the
-	 *				TLAN device with the EEPROM to
-	 *				use.
-	 *		data		The 8 bits of information to
-	 *				send to the EEPROM.
-	 *		stop		If TLAN_EEPROM_STOP is passed, a
-	 *				stop cycle is sent after the
-	 *				byte is sent after the ack is
-	 *				read.
-	 *
-	 *	This function sends a byte on the serial EEPROM line,
-	 *	driving the clock to send each bit. The function then
-	 *	reverses transmission direction and reads an acknowledge
-	 *	bit.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_ee_send_byte
+ *
+ *	Returns:
+ *		If the correct ack was received, 0, otherwise 1
+ *	Parms:	io_base		The IO port base address for the
+ *				TLAN device with the EEPROM to
+ *				use.
+ *		data		The 8 bits of information to
+ *				send to the EEPROM.
+ *		stop		If TLAN_EEPROM_STOP is passed, a
+ *				stop cycle is sent after the
+ *				byte is sent after the ack is
+ *				read.
+ *
+ *	This function sends a byte on the serial EEPROM line,
+ *	driving the clock to send each bit. The function then
+ *	reverses transmission direction and reads an acknowledge
+ *	bit.
+ *
+ **************************************************************/
 
-static int TLan_EeSendByte( u16 io_base, u8 data, int stop )
+static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
 {
 	int	err;
 	u8	place;
 	u16	sio;
 
-	outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+	outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
 	sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
 
 	/* Assume clock is low, tx is enabled; */
-	for ( place = 0x80; place != 0; place >>= 1 ) {
-		if ( place & data )
-			TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+	for (place = 0x80; place != 0; place >>= 1) {
+		if (place & data)
+			tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
 		else
-			TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
-		TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-		TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+			tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+		tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+		tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
 	}
-	TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
-	TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-	err = TLan_GetBit( TLAN_NET_SIO_EDATA, sio );
-	TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
-	TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
+	tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+	tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+	err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
+	tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+	tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
 
-	if ( ( ! err ) && stop ) {
+	if ((!err) && stop) {
 		/* STOP, raise data while clock is high */
-		TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
-		TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-		TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+		tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+		tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+		tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
 	}
 
 	return err;
 
-} /* TLan_EeSendByte */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_EeReceiveByte
-	 *
-	 *	Returns:
-	 *		Nothing
-	 *	Parms:
-	 *		io_base		The IO port base address for the
-	 *				TLAN device with the EEPROM to
-	 *				use.
-	 *		data		An address to a char to hold the
-	 *				data sent from the EEPROM.
-	 *		stop		If TLAN_EEPROM_STOP is passed, a
-	 *				stop cycle is sent after the
-	 *				byte is received, and no ack is
-	 *				sent.
-	 *
-	 *	This function receives 8 bits of data from the EEPROM
-	 *	over the serial link.  It then sends and ack bit, or no
-	 *	ack and a stop bit.  This function is used to retrieve
-	 *	data after the address of a byte in the EEPROM has been
-	 *	sent.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_ee_receive_byte
+ *
+ *	Returns:
+ *		Nothing
+ *	Parms:
+ *		io_base		The IO port base address for the
+ *				TLAN device with the EEPROM to
+ *				use.
+ *		data		An address to a char to hold the
+ *				data sent from the EEPROM.
+ *		stop		If TLAN_EEPROM_STOP is passed, a
+ *				stop cycle is sent after the
+ *				byte is received, and no ack is
+ *				sent.
+ *
+ *	This function receives 8 bits of data from the EEPROM
+ *	over the serial link.  It then sends and ack bit, or no
+ *	ack and a stop bit.  This function is used to retrieve
+ *	data after the address of a byte in the EEPROM has been
+ *	sent.
+ *
+ **************************************************************/
 
-static void TLan_EeReceiveByte( u16 io_base, u8 *data, int stop )
+static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
 {
 	u8  place;
 	u16 sio;
 
-	outw( TLAN_NET_SIO, io_base + TLAN_DIO_ADR );
+	outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
 	sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
 	*data = 0;
 
 	/* Assume clock is low, tx is enabled; */
-	TLan_ClearBit( TLAN_NET_SIO_ETXEN, sio );
-	for ( place = 0x80; place; place >>= 1 ) {
-		TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-		if ( TLan_GetBit( TLAN_NET_SIO_EDATA, sio ) )
+	tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+	for (place = 0x80; place; place >>= 1) {
+		tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+		if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
 			*data |= place;
-		TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+		tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
 	}
 
-	TLan_SetBit( TLAN_NET_SIO_ETXEN, sio );
-	if ( ! stop ) {
-		TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );	/* Ack = 0 */
-		TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-		TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+	tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+	if (!stop) {
+		tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); /* ack = 0 */
+		tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+		tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
 	} else {
-		TLan_SetBit( TLAN_NET_SIO_EDATA, sio );		/* No ack = 1 (?) */
-		TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-		TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio );
+		tlan_set_bit(TLAN_NET_SIO_EDATA, sio);	/* no ack = 1 (?) */
+		tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+		tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
 		/* STOP, raise data while clock is high */
-		TLan_ClearBit( TLAN_NET_SIO_EDATA, sio );
-		TLan_SetBit( TLAN_NET_SIO_ECLOK, sio );
-		TLan_SetBit( TLAN_NET_SIO_EDATA, sio );
+		tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+		tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+		tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
 	}
 
-} /* TLan_EeReceiveByte */
+}
 
 
 
 
-	/***************************************************************
-	 *	TLan_EeReadByte
-	 *
-	 *	Returns:
-	 *		No error = 0, else, the stage at which the error
-	 *		occurred.
-	 *	Parms:
-	 *		io_base		The IO port base address for the
-	 *				TLAN device with the EEPROM to
-	 *				use.
-	 *		ee_addr		The address of the byte in the
-	 *				EEPROM whose contents are to be
-	 *				retrieved.
-	 *		data		An address to a char to hold the
-	 *				data obtained from the EEPROM.
-	 *
-	 *	This function reads a byte of information from an byte
-	 *	cell in the EEPROM.
-	 *
-	 **************************************************************/
+/***************************************************************
+ *	tlan_ee_read_byte
+ *
+ *	Returns:
+ *		No error = 0, else, the stage at which the error
+ *		occurred.
+ *	Parms:
+ *		io_base		The IO port base address for the
+ *				TLAN device with the EEPROM to
+ *				use.
+ *		ee_addr		The address of the byte in the
+ *				EEPROM whose contents are to be
+ *				retrieved.
+ *		data		An address to a char to hold the
+ *				data obtained from the EEPROM.
+ *
+ *	This function reads a byte of information from an byte
+ *	cell in the EEPROM.
+ *
+ **************************************************************/
 
-static int TLan_EeReadByte( struct net_device *dev, u8 ee_addr, u8 *data )
+static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
 {
 	int err;
-	TLanPrivateInfo *priv = netdev_priv(dev);
+	struct tlan_priv *priv = netdev_priv(dev);
 	unsigned long flags = 0;
-	int ret=0;
+	int ret = 0;
 
 	spin_lock_irqsave(&priv->lock, flags);
 
-	TLan_EeSendStart( dev->base_addr );
-	err = TLan_EeSendByte( dev->base_addr, 0xA0, TLAN_EEPROM_ACK );
-	if (err)
-	{
-		ret=1;
+	tlan_ee_send_start(dev->base_addr);
+	err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
+	if (err) {
+		ret = 1;
 		goto fail;
 	}
-	err = TLan_EeSendByte( dev->base_addr, ee_addr, TLAN_EEPROM_ACK );
-	if (err)
-	{
-		ret=2;
+	err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
+	if (err) {
+		ret = 2;
 		goto fail;
 	}
-	TLan_EeSendStart( dev->base_addr );
-	err = TLan_EeSendByte( dev->base_addr, 0xA1, TLAN_EEPROM_ACK );
-	if (err)
-	{
-		ret=3;
+	tlan_ee_send_start(dev->base_addr);
+	err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
+	if (err) {
+		ret = 3;
 		goto fail;
 	}
-	TLan_EeReceiveByte( dev->base_addr, data, TLAN_EEPROM_STOP );
+	tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
 fail:
 	spin_unlock_irqrestore(&priv->lock, flags);
 
 	return ret;
 
-} /* TLan_EeReadByte */
+}
 
 
 
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index 3315ced..5fc98a8 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -20,8 +20,8 @@
  ********************************************************************/
 
 
-#include <asm/io.h>
-#include <asm/types.h>
+#include <linux/io.h>
+#include <linux/types.h>
 #include <linux/netdevice.h>
 
 
@@ -40,8 +40,11 @@
 #define TLAN_IGNORE		0
 #define TLAN_RECORD		1
 
-#define TLAN_DBG(lvl, format, args...)	\
-	do { if (debug&lvl) printk(KERN_DEBUG "TLAN: " format, ##args ); } while(0)
+#define TLAN_DBG(lvl, format, args...)					\
+	do {								\
+		if (debug&lvl)						\
+			printk(KERN_DEBUG "TLAN: " format, ##args);	\
+	} while (0)
 
 #define TLAN_DEBUG_GNRL		0x0001
 #define TLAN_DEBUG_TX		0x0002
@@ -50,7 +53,8 @@
 #define TLAN_DEBUG_PROBE	0x0010
 
 #define TX_TIMEOUT		(10*HZ)	 /* We need time for auto-neg */
-#define MAX_TLAN_BOARDS		8	 /* Max number of boards installed at a time */
+#define MAX_TLAN_BOARDS		8	 /* Max number of boards installed
+					    at a time */
 
 
 	/*****************************************************************
@@ -70,13 +74,13 @@
 #define PCI_DEVICE_ID_OLICOM_OC2326			0x0014
 #endif
 
-typedef struct tlan_adapter_entry {
-	u16	vendorId;
-	u16	deviceId;
-	char	*deviceLabel;
+struct tlan_adapter_entry {
+	u16	vendor_id;
+	u16	device_id;
+	char	*device_label;
 	u32	flags;
-	u16	addrOfs;
-} TLanAdapterEntry;
+	u16	addr_ofs;
+};
 
 #define TLAN_ADAPTER_NONE		0x00000000
 #define TLAN_ADAPTER_UNMANAGED_PHY	0x00000001
@@ -129,18 +133,18 @@
 #define TLAN_CSTAT_DP_PR	0x0100
 
 
-typedef struct tlan_buffer_ref_tag {
+struct tlan_buffer {
 	u32	count;
 	u32	address;
-} TLanBufferRef;
+};
 
 
-typedef struct tlan_list_tag {
+struct tlan_list {
 	u32		forward;
-	u16		cStat;
-	u16		frameSize;
-	TLanBufferRef	buffer[TLAN_BUFFERS_PER_LIST];
-} TLanList;
+	u16		c_stat;
+	u16		frame_size;
+	struct tlan_buffer buffer[TLAN_BUFFERS_PER_LIST];
+};
 
 
 typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
@@ -164,49 +168,49 @@
 	 *
 	 ****************************************************************/
 
-typedef struct tlan_private_tag {
-	struct net_device       *nextDevice;
-	struct pci_dev		*pciDev;
+struct tlan_priv {
+	struct net_device       *next_device;
+	struct pci_dev		*pci_dev;
 	struct net_device       *dev;
-	void			*dmaStorage;
-	dma_addr_t		dmaStorageDMA;
-	unsigned int		dmaSize;
-	u8			*padBuffer;
-	TLanList                *rxList;
-	dma_addr_t		rxListDMA;
-	u8			*rxBuffer;
-	dma_addr_t		rxBufferDMA;
-	u32                     rxHead;
-	u32                     rxTail;
-	u32			rxEocCount;
-	TLanList                *txList;
-	dma_addr_t		txListDMA;
-	u8			*txBuffer;
-	dma_addr_t		txBufferDMA;
-	u32                     txHead;
-	u32                     txInProgress;
-	u32                     txTail;
-	u32			txBusyCount;
-	u32                     phyOnline;
-	u32			timerSetAt;
-	u32			timerType;
+	void			*dma_storage;
+	dma_addr_t		dma_storage_dma;
+	unsigned int		dma_size;
+	u8			*pad_buffer;
+	struct tlan_list	*rx_list;
+	dma_addr_t		rx_list_dma;
+	u8			*rx_buffer;
+	dma_addr_t		rx_buffer_dma;
+	u32			rx_head;
+	u32			rx_tail;
+	u32			rx_eoc_count;
+	struct tlan_list	*tx_list;
+	dma_addr_t		tx_list_dma;
+	u8			*tx_buffer;
+	dma_addr_t		tx_buffer_dma;
+	u32			tx_head;
+	u32			tx_in_progress;
+	u32			tx_tail;
+	u32			tx_busy_count;
+	u32			phy_online;
+	u32			timer_set_at;
+	u32			timer_type;
 	struct timer_list	timer;
 	struct board		*adapter;
-	u32			adapterRev;
+	u32			adapter_rev;
 	u32			aui;
 	u32			debug;
 	u32			duplex;
 	u32			phy[2];
-	u32			phyNum;
+	u32			phy_num;
 	u32			speed;
-	u8			tlanRev;
-	u8			tlanFullDuplex;
+	u8			tlan_rev;
+	u8			tlan_full_duplex;
 	spinlock_t		lock;
 	u8			link;
 	u8			is_eisa;
 	struct work_struct			tlan_tqueue;
 	u8			neg_be_verbose;
-} TLanPrivateInfo;
+};
 
 
 
@@ -247,7 +251,7 @@
 	 ****************************************************************/
 
 #define TLAN_HOST_CMD			0x00
-#define 	TLAN_HC_GO		0x80000000
+#define	TLAN_HC_GO		0x80000000
 #define		TLAN_HC_STOP		0x40000000
 #define		TLAN_HC_ACK		0x20000000
 #define		TLAN_HC_CS_MASK		0x1FE00000
@@ -283,7 +287,7 @@
 #define		TLAN_NET_CMD_TRFRAM	0x02
 #define		TLAN_NET_CMD_TXPACE	0x01
 #define TLAN_NET_SIO			0x01
-#define 	TLAN_NET_SIO_MINTEN	0x80
+#define	TLAN_NET_SIO_MINTEN	0x80
 #define		TLAN_NET_SIO_ECLOK	0x40
 #define		TLAN_NET_SIO_ETXEN	0x20
 #define		TLAN_NET_SIO_EDATA	0x10
@@ -304,7 +308,7 @@
 #define		TLAN_NET_MASK_MASK4	0x10
 #define		TLAN_NET_MASK_RSRVD	0x0F
 #define TLAN_NET_CONFIG			0x04
-#define 	TLAN_NET_CFG_RCLK	0x8000
+#define	TLAN_NET_CFG_RCLK	0x8000
 #define		TLAN_NET_CFG_TCLK	0x4000
 #define		TLAN_NET_CFG_BIT	0x2000
 #define		TLAN_NET_CFG_RXCRC	0x1000
@@ -372,7 +376,7 @@
 /* Generic MII/PHY Registers */
 
 #define MII_GEN_CTL			0x00
-#define 	MII_GC_RESET		0x8000
+#define	MII_GC_RESET		0x8000
 #define		MII_GC_LOOPBK		0x4000
 #define		MII_GC_SPEEDSEL		0x2000
 #define		MII_GC_AUTOENB		0x1000
@@ -397,9 +401,9 @@
 #define		MII_GS_EXTCAP		0x0001
 #define MII_GEN_ID_HI			0x02
 #define MII_GEN_ID_LO			0x03
-#define 	MII_GIL_OUI		0xFC00
-#define 	MII_GIL_MODEL		0x03F0
-#define 	MII_GIL_REVISION	0x000F
+#define	MII_GIL_OUI		0xFC00
+#define	MII_GIL_MODEL		0x03F0
+#define	MII_GIL_REVISION	0x000F
 #define MII_AN_ADV			0x04
 #define MII_AN_LPA			0x05
 #define MII_AN_EXP			0x06
@@ -408,7 +412,7 @@
 
 #define TLAN_TLPHY_ID			0x10
 #define TLAN_TLPHY_CTL			0x11
-#define 	TLAN_TC_IGLINK		0x8000
+#define	TLAN_TC_IGLINK		0x8000
 #define		TLAN_TC_SWAPOL		0x4000
 #define		TLAN_TC_AUISEL		0x2000
 #define		TLAN_TC_SQEEN		0x1000
@@ -435,41 +439,41 @@
 #define LEVEL1_ID1			0x7810
 #define LEVEL1_ID2			0x0000
 
-#define CIRC_INC( a, b ) if ( ++a >= b ) a = 0
+#define CIRC_INC(a, b) if (++a >= b) a = 0
 
 /* Routines to access internal registers. */
 
-static inline u8 TLan_DioRead8(u16 base_addr, u16 internal_addr)
+static inline u8 tlan_dio_read8(u16 base_addr, u16 internal_addr)
 {
 	outw(internal_addr, base_addr + TLAN_DIO_ADR);
 	return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3));
 
-} /* TLan_DioRead8 */
+}
 
 
 
 
-static inline u16 TLan_DioRead16(u16 base_addr, u16 internal_addr)
+static inline u16 tlan_dio_read16(u16 base_addr, u16 internal_addr)
 {
 	outw(internal_addr, base_addr + TLAN_DIO_ADR);
 	return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2));
 
-} /* TLan_DioRead16 */
+}
 
 
 
 
-static inline u32 TLan_DioRead32(u16 base_addr, u16 internal_addr)
+static inline u32 tlan_dio_read32(u16 base_addr, u16 internal_addr)
 {
 	outw(internal_addr, base_addr + TLAN_DIO_ADR);
 	return inl(base_addr + TLAN_DIO_DATA);
 
-} /* TLan_DioRead32 */
+}
 
 
 
 
-static inline void TLan_DioWrite8(u16 base_addr, u16 internal_addr, u8 data)
+static inline void tlan_dio_write8(u16 base_addr, u16 internal_addr, u8 data)
 {
 	outw(internal_addr, base_addr + TLAN_DIO_ADR);
 	outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3));
@@ -479,7 +483,7 @@
 
 
 
-static inline void TLan_DioWrite16(u16 base_addr, u16 internal_addr, u16 data)
+static inline void tlan_dio_write16(u16 base_addr, u16 internal_addr, u16 data)
 {
 	outw(internal_addr, base_addr + TLAN_DIO_ADR);
 	outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
@@ -489,16 +493,16 @@
 
 
 
-static inline void TLan_DioWrite32(u16 base_addr, u16 internal_addr, u32 data)
+static inline void tlan_dio_write32(u16 base_addr, u16 internal_addr, u32 data)
 {
 	outw(internal_addr, base_addr + TLAN_DIO_ADR);
 	outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
 
 }
 
-#define TLan_ClearBit( bit, port )	outb_p(inb_p(port) & ~bit, port)
-#define TLan_GetBit( bit, port )	((int) (inb_p(port) & bit))
-#define TLan_SetBit( bit, port )	outb_p(inb_p(port) | bit, port)
+#define tlan_clear_bit(bit, port)	outb_p(inb_p(port) & ~bit, port)
+#define tlan_get_bit(bit, port)	((int) (inb_p(port) & bit))
+#define tlan_set_bit(bit, port)	outb_p(inb_p(port) | bit, port)
 
 /*
  * given 6 bytes, view them as 8 6-bit numbers and return the XOR of those
@@ -506,37 +510,37 @@
  *
  * The original code was:
  *
- * u32	xor( u32 a, u32 b ) {	return ( ( a && ! b ) || ( ! a && b ) ); }
+ * u32	xor(u32 a, u32 b) {	return ((a && !b ) || (! a && b )); }
  *
- * #define XOR8( a, b, c, d, e, f, g, h )	\
- * 	xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) )
- * #define DA( a, bit )		( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) )
+ * #define XOR8(a, b, c, d, e, f, g, h)	\
+ *	xor(a, xor(b, xor(c, xor(d, xor(e, xor(f, xor(g, h)) ) ) ) ) )
+ * #define DA(a, bit)		(( (u8) a[bit/8] ) & ( (u8) (1 << bit%8)) )
  *
- * 	hash  = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
- * 	              DA(a,30), DA(a,36), DA(a,42) );
- * 	hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
- * 		      DA(a,31), DA(a,37), DA(a,43) ) << 1;
- * 	hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
- * 		      DA(a,32), DA(a,38), DA(a,44) ) << 2;
- * 	hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
- * 		      DA(a,33), DA(a,39), DA(a,45) ) << 3;
- * 	hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
- * 	              DA(a,34), DA(a,40), DA(a,46) ) << 4;
- * 	hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
- * 	              DA(a,35), DA(a,41), DA(a,47) ) << 5;
+ *	hash  = XOR8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
+ *		      DA(a,30), DA(a,36), DA(a,42));
+ *	hash |= XOR8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
+ *		      DA(a,31), DA(a,37), DA(a,43)) << 1;
+ *	hash |= XOR8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
+ *		      DA(a,32), DA(a,38), DA(a,44)) << 2;
+ *	hash |= XOR8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
+ *		      DA(a,33), DA(a,39), DA(a,45)) << 3;
+ *	hash |= XOR8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
+ *		      DA(a,34), DA(a,40), DA(a,46)) << 4;
+ *	hash |= XOR8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
+ *		      DA(a,35), DA(a,41), DA(a,47)) << 5;
  *
  */
-static inline u32 TLan_HashFunc( const u8 *a )
+static inline u32 tlan_hash_func(const u8 *a)
 {
-        u8     hash;
+	u8     hash;
 
-        hash = (a[0]^a[3]);             /* & 077 */
-        hash ^= ((a[0]^a[3])>>6);       /* & 003 */
-        hash ^= ((a[1]^a[4])<<2);       /* & 074 */
-        hash ^= ((a[1]^a[4])>>4);       /* & 017 */
-        hash ^= ((a[2]^a[5])<<4);       /* & 060 */
-        hash ^= ((a[2]^a[5])>>2);       /* & 077 */
+	hash = (a[0]^a[3]);		/* & 077 */
+	hash ^= ((a[0]^a[3])>>6);	/* & 003 */
+	hash ^= ((a[1]^a[4])<<2);	/* & 074 */
+	hash ^= ((a[1]^a[4])>>4);	/* & 017 */
+	hash ^= ((a[2]^a[5])<<4);	/* & 060 */
+	hash ^= ((a[2]^a[5])>>2);	/* & 077 */
 
-        return hash & 077;
+	return hash & 077;
 }
 #endif
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b100bd5..55786a0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1142,7 +1142,7 @@
  * privs required. */
 static int set_offload(struct net_device *dev, unsigned long arg)
 {
-	unsigned int old_features, features;
+	u32 old_features, features;
 
 	old_features = dev->features;
 	/* Unset features, set them as we chew on the arg. */
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index a3c46f6..7fa5ec2 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -123,12 +123,11 @@
 #include <linux/in6.h>
 #include <linux/dma-mapping.h>
 #include <linux/firmware.h>
-#include <generated/utsrelease.h>
 
 #include "typhoon.h"
 
 MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
-MODULE_VERSION(UTS_RELEASE);
+MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
 MODULE_FIRMWARE(FIRMWARE_NAME);
 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d776c4a..7113168 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1,7 +1,7 @@
 /*
  * cdc_ncm.c
  *
- * Copyright (C) ST-Ericsson 2010
+ * Copyright (C) ST-Ericsson 2010-2011
  * Contact: Alexey Orishko <alexey.orishko@stericsson.com>
  * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
  *
@@ -54,7 +54,7 @@
 #include <linux/usb/usbnet.h>
 #include <linux/usb/cdc.h>
 
-#define	DRIVER_VERSION				"30-Nov-2010"
+#define	DRIVER_VERSION				"7-Feb-2011"
 
 /* CDC NCM subclass 3.2.1 */
 #define USB_CDC_NCM_NDP16_LENGTH_MIN		0x10
@@ -77,6 +77,9 @@
  */
 #define	CDC_NCM_DPT_DATAGRAMS_MAX		32
 
+/* Maximum amount of IN datagrams in NTB */
+#define	CDC_NCM_DPT_DATAGRAMS_IN_MAX		0 /* unlimited */
+
 /* Restart the timer, if amount of datagrams is less than given value */
 #define	CDC_NCM_RESTART_TIMER_DATAGRAM_CNT	3
 
@@ -85,11 +88,6 @@
 	(sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
 	(CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
 
-struct connection_speed_change {
-	__le32	USBitRate; /* holds 3GPP downlink value, bits per second */
-	__le32	DSBitRate; /* holds 3GPP uplink value, bits per second */
-} __attribute__ ((packed));
-
 struct cdc_ncm_data {
 	struct usb_cdc_ncm_nth16 nth16;
 	struct usb_cdc_ncm_ndp16 ndp16;
@@ -198,10 +196,10 @@
 {
 	struct usb_cdc_notification req;
 	u32 val;
-	__le16 max_datagram_size;
 	u8 flags;
 	u8 iface_no;
 	int err;
+	u16 ntb_fmt_supported;
 
 	iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
 
@@ -223,6 +221,9 @@
 	ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
 	ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
 	ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
+	/* devices prior to NCM Errata shall set this field to zero */
+	ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
+	ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
 
 	if (ctx->func_desc != NULL)
 		flags = ctx->func_desc->bmNetworkCapabilities;
@@ -231,22 +232,58 @@
 
 	pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
 		 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
-		 "wNdpOutAlignment=%u flags=0x%x\n",
+		 "wNdpOutAlignment=%u wNtbOutMaxDatagrams=%u flags=0x%x\n",
 		 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
-		 ctx->tx_ndp_modulus, flags);
+		 ctx->tx_ndp_modulus, ctx->tx_max_datagrams, flags);
 
-	/* max count of tx datagrams without terminating NULL entry */
-	ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
+	/* max count of tx datagrams */
+	if ((ctx->tx_max_datagrams == 0) ||
+			(ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX))
+		ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
 
 	/* verify maximum size of received NTB in bytes */
-	if ((ctx->rx_max <
-	    (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
-	    (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) {
+	if (ctx->rx_max < USB_CDC_NCM_NTB_MIN_IN_SIZE) {
+		pr_debug("Using min receive length=%d\n",
+						USB_CDC_NCM_NTB_MIN_IN_SIZE);
+		ctx->rx_max = USB_CDC_NCM_NTB_MIN_IN_SIZE;
+	}
+
+	if (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX) {
 		pr_debug("Using default maximum receive length=%d\n",
 						CDC_NCM_NTB_MAX_SIZE_RX);
 		ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
 	}
 
+	/* inform device about NTB input size changes */
+	if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
+		req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+							USB_RECIP_INTERFACE;
+		req.bNotificationType = USB_CDC_SET_NTB_INPUT_SIZE;
+		req.wValue = 0;
+		req.wIndex = cpu_to_le16(iface_no);
+
+		if (flags & USB_CDC_NCM_NCAP_NTB_INPUT_SIZE) {
+			struct usb_cdc_ncm_ndp_input_size ndp_in_sz;
+
+			req.wLength = 8;
+			ndp_in_sz.dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+			ndp_in_sz.wNtbInMaxDatagrams =
+					cpu_to_le16(CDC_NCM_DPT_DATAGRAMS_MAX);
+			ndp_in_sz.wReserved = 0;
+			err = cdc_ncm_do_request(ctx, &req, &ndp_in_sz, 0, NULL,
+									1000);
+		} else {
+			__le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
+
+			req.wLength = 4;
+			err = cdc_ncm_do_request(ctx, &req, &dwNtbInMaxSize, 0,
+								NULL, 1000);
+		}
+
+		if (err)
+			pr_debug("Setting NTB Input Size failed\n");
+	}
+
 	/* verify maximum size of transmitted NTB in bytes */
 	if ((ctx->tx_max <
 	    (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
@@ -297,47 +334,84 @@
 	/* additional configuration */
 
 	/* set CRC Mode */
-	req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
-	req.bNotificationType = USB_CDC_SET_CRC_MODE;
-	req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
-	req.wIndex = cpu_to_le16(iface_no);
-	req.wLength = 0;
+	if (flags & USB_CDC_NCM_NCAP_CRC_MODE) {
+		req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+							USB_RECIP_INTERFACE;
+		req.bNotificationType = USB_CDC_SET_CRC_MODE;
+		req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
+		req.wIndex = cpu_to_le16(iface_no);
+		req.wLength = 0;
 
-	err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
-	if (err)
-		pr_debug("Setting CRC mode off failed\n");
+		err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
+		if (err)
+			pr_debug("Setting CRC mode off failed\n");
+	}
 
-	/* set NTB format */
-	req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
-	req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
-	req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
-	req.wIndex = cpu_to_le16(iface_no);
-	req.wLength = 0;
+	/* set NTB format, if both formats are supported */
+	if (ntb_fmt_supported & USB_CDC_NCM_NTH32_SIGN) {
+		req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+							USB_RECIP_INTERFACE;
+		req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
+		req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
+		req.wIndex = cpu_to_le16(iface_no);
+		req.wLength = 0;
 
-	err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
-	if (err)
-		pr_debug("Setting NTB format to 16-bit failed\n");
+		err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
+		if (err)
+			pr_debug("Setting NTB format to 16-bit failed\n");
+	}
+
+	ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
 
 	/* set Max Datagram Size (MTU) */
-	req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
-	req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
-	req.wValue = 0;
-	req.wIndex = cpu_to_le16(iface_no);
-	req.wLength = cpu_to_le16(2);
+	if (flags & USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE) {
+		__le16 max_datagram_size;
+		u16 eth_max_sz = le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
 
-	err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000);
-	if (err) {
-		pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n",
-			 CDC_NCM_MIN_DATAGRAM_SIZE);
-		/* use default */
-		ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
-	} else {
-		ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
+		req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN |
+							USB_RECIP_INTERFACE;
+		req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
+		req.wValue = 0;
+		req.wIndex = cpu_to_le16(iface_no);
+		req.wLength = cpu_to_le16(2);
 
-		if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
-			ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
-		else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
-			ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
+		err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL,
+									1000);
+		if (err) {
+			pr_debug("GET_MAX_DATAGRAM_SIZE failed, use size=%u\n",
+						CDC_NCM_MIN_DATAGRAM_SIZE);
+		} else {
+			ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
+			/* Check Eth descriptor value */
+			if (eth_max_sz < CDC_NCM_MAX_DATAGRAM_SIZE) {
+				if (ctx->max_datagram_size > eth_max_sz)
+					ctx->max_datagram_size = eth_max_sz;
+			} else {
+				if (ctx->max_datagram_size >
+						CDC_NCM_MAX_DATAGRAM_SIZE)
+					ctx->max_datagram_size =
+						CDC_NCM_MAX_DATAGRAM_SIZE;
+			}
+
+			if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
+				ctx->max_datagram_size =
+					CDC_NCM_MIN_DATAGRAM_SIZE;
+
+			/* if value changed, update device */
+			req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT |
+							USB_RECIP_INTERFACE;
+			req.bNotificationType = USB_CDC_SET_MAX_DATAGRAM_SIZE;
+			req.wValue = 0;
+			req.wIndex = cpu_to_le16(iface_no);
+			req.wLength = 2;
+			max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
+
+			err = cdc_ncm_do_request(ctx, &req, &max_datagram_size,
+								0, NULL, 1000);
+			if (err)
+				pr_debug("SET_MAX_DATAGRAM_SIZE failed\n");
+		}
+
 	}
 
 	if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
@@ -466,19 +540,13 @@
 
 			ctx->ether_desc =
 					(const struct usb_cdc_ether_desc *)buf;
-
 			dev->hard_mtu =
 				le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
 
-			if (dev->hard_mtu <
-			    (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN))
-				dev->hard_mtu =
-					CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN;
-
-			else if (dev->hard_mtu >
-				 (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN))
-				dev->hard_mtu =
-					CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN;
+			if (dev->hard_mtu < CDC_NCM_MIN_DATAGRAM_SIZE)
+				dev->hard_mtu =	CDC_NCM_MIN_DATAGRAM_SIZE;
+			else if (dev->hard_mtu > CDC_NCM_MAX_DATAGRAM_SIZE)
+				dev->hard_mtu =	CDC_NCM_MAX_DATAGRAM_SIZE;
 			break;
 
 		case USB_CDC_NCM_TYPE:
@@ -628,13 +696,13 @@
 	u32 offset;
 	u32 last_offset;
 	u16 n = 0;
-	u8 timeout = 0;
+	u8 ready2send = 0;
 
 	/* if there is a remaining skb, it gets priority */
 	if (skb != NULL)
 		swap(skb, ctx->tx_rem_skb);
 	else
-		timeout = 1;
+		ready2send = 1;
 
 	/*
 	 * +----------------+
@@ -682,9 +750,10 @@
 
 	for (; n < ctx->tx_max_datagrams; n++) {
 		/* check if end of transmit buffer is reached */
-		if (offset >= ctx->tx_max)
+		if (offset >= ctx->tx_max) {
+			ready2send = 1;
 			break;
-
+		}
 		/* compute maximum buffer size */
 		rem = ctx->tx_max - offset;
 
@@ -711,9 +780,7 @@
 				}
 				ctx->tx_rem_skb = skb;
 				skb = NULL;
-
-				/* loop one more time */
-				timeout = 1;
+				ready2send = 1;
 			}
 			break;
 		}
@@ -756,7 +823,7 @@
 		ctx->tx_curr_last_offset = last_offset;
 		goto exit_no_skb;
 
-	} else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) {
+	} else if ((n < ctx->tx_max_datagrams) && (ready2send == 0)) {
 		/* wait for more frames */
 		/* push variables */
 		ctx->tx_curr_skb = skb_out;
@@ -813,7 +880,7 @@
 					cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
 	ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
 	ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
-	ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
+	ctx->tx_ncm.nth16.wNdpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
 							ctx->tx_ndp_modulus);
 
 	memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
@@ -825,13 +892,13 @@
 	rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
 					sizeof(struct usb_cdc_ncm_dpe16));
 	ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
-	ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */
+	ctx->tx_ncm.ndp16.wNextNdpIndex = 0; /* reserved */
 
-	memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex,
+	memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex,
 						&(ctx->tx_ncm.ndp16),
 						sizeof(ctx->tx_ncm.ndp16));
 
-	memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex +
+	memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wNdpIndex +
 					sizeof(ctx->tx_ncm.ndp16),
 					&(ctx->tx_ncm.dpe16),
 					(ctx->tx_curr_frame_num + 1) *
@@ -868,15 +935,19 @@
 	if (ctx->tx_timer_pending != 0) {
 		ctx->tx_timer_pending--;
 		restart = 1;
-	} else
+	} else {
 		restart = 0;
+	}
 
 	spin_unlock(&ctx->mtx);
 
-	if (restart)
+	if (restart) {
+		spin_lock(&ctx->mtx);
 		cdc_ncm_tx_timeout_start(ctx);
-	else if (ctx->netdev != NULL)
+		spin_unlock(&ctx->mtx);
+	} else if (ctx->netdev != NULL) {
 		usbnet_start_xmit(NULL, ctx->netdev);
+	}
 }
 
 static struct sk_buff *
@@ -900,7 +971,6 @@
 	skb_out = cdc_ncm_fill_tx_frame(ctx, skb);
 	if (ctx->tx_curr_skb != NULL)
 		need_timer = 1;
-	spin_unlock(&ctx->mtx);
 
 	/* Start timer, if there is a remaining skb */
 	if (need_timer)
@@ -908,6 +978,8 @@
 
 	if (skb_out)
 		dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
+
+	spin_unlock(&ctx->mtx);
 	return skb_out;
 
 error:
@@ -956,7 +1028,7 @@
 		goto error;
 	}
 
-	temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex);
+	temp = le16_to_cpu(ctx->rx_ncm.nth16.wNdpIndex);
 	if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) {
 		pr_debug("invalid DPT16 index\n");
 		goto error;
@@ -1020,8 +1092,8 @@
 		if (((offset + temp) > actlen) ||
 		    (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) {
 			pr_debug("invalid frame detected (ignored)"
-				"offset[%u]=%u, length=%u, skb=%p\n",
-							x, offset, temp, skb_in);
+					"offset[%u]=%u, length=%u, skb=%p\n",
+					x, offset, temp, skb_in);
 			if (!x)
 				goto error;
 			break;
@@ -1043,10 +1115,10 @@
 
 static void
 cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
-		     struct connection_speed_change *data)
+		     struct usb_cdc_speed_change *data)
 {
-	uint32_t rx_speed = le32_to_cpu(data->USBitRate);
-	uint32_t tx_speed = le32_to_cpu(data->DSBitRate);
+	uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
+	uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
 
 	/*
 	 * Currently the USB-NET API does not support reporting the actual
@@ -1087,7 +1159,7 @@
 	/* test for split data in 8-byte chunks */
 	if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
 		cdc_ncm_speed_change(ctx,
-		      (struct connection_speed_change *)urb->transfer_buffer);
+		      (struct usb_cdc_speed_change *)urb->transfer_buffer);
 		return;
 	}
 
@@ -1115,12 +1187,12 @@
 		break;
 
 	case USB_CDC_NOTIFY_SPEED_CHANGE:
-		if (urb->actual_length <
-		    (sizeof(*event) + sizeof(struct connection_speed_change)))
+		if (urb->actual_length < (sizeof(*event) +
+					sizeof(struct usb_cdc_speed_change)))
 			set_bit(EVENT_STS_SPLIT, &dev->flags);
 		else
 			cdc_ncm_speed_change(ctx,
-				(struct connection_speed_change *) &event[1]);
+				(struct usb_cdc_speed_change *) &event[1]);
 		break;
 
 	default:
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index bed8fce..6d83812 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2628,15 +2628,15 @@
 
 static void hso_free_tiomget(struct hso_serial *serial)
 {
-	struct hso_tiocmget *tiocmget = serial->tiocmget;
+	struct hso_tiocmget *tiocmget;
+	if (!serial)
+		return;
+	tiocmget = serial->tiocmget;
 	if (tiocmget) {
-		if (tiocmget->urb) {
-			usb_free_urb(tiocmget->urb);
-			tiocmget->urb = NULL;
-		}
+		usb_free_urb(tiocmget->urb);
+		tiocmget->urb = NULL;
 		serial->tiocmget = NULL;
 		kfree(tiocmget);
-
 	}
 }
 
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 5e98643..7dc8497 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -406,6 +406,7 @@
 
 	if (fw->size > KAWETH_FIRMWARE_BUF_SIZE) {
 		err("Firmware too big: %zu", fw->size);
+		release_firmware(fw);
 		return -ENOSPC;
 	}
 	data_len = fw->size;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ed9a416..95c41d5 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -931,8 +931,10 @@
 		if (urb != NULL) {
 			clear_bit (EVENT_RX_MEMORY, &dev->flags);
 			status = usb_autopm_get_interface(dev->intf);
-			if (status < 0)
+			if (status < 0) {
+				usb_free_urb(urb);
 				goto fail_lowmem;
+			}
 			if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
 				resched = 0;
 			usb_autopm_put_interface(dev->intf);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index cc83fa7..105d7f0 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -403,17 +403,6 @@
 	if (tb[IFLA_ADDRESS] == NULL)
 		random_ether_addr(dev->dev_addr);
 
-	if (tb[IFLA_IFNAME])
-		nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
-	else
-		snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
-
-	if (strchr(dev->name, '%')) {
-		err = dev_alloc_name(dev, dev->name);
-		if (err < 0)
-			goto err_alloc_name;
-	}
-
 	err = register_netdevice(dev);
 	if (err < 0)
 		goto err_register_dev;
@@ -433,7 +422,6 @@
 
 err_register_dev:
 	/* nothing to do */
-err_alloc_name:
 err_configure_peer:
 	unregister_netdevice(peer);
 	return err;
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 09cac70..0d6fec6 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -2923,6 +2923,7 @@
 static int velocity_set_wol(struct velocity_info *vptr)
 {
 	struct mac_regs __iomem *regs = vptr->mac_regs;
+	enum speed_opt spd_dpx = vptr->options.spd_dpx;
 	static u8 buf[256];
 	int i;
 
@@ -2968,6 +2969,12 @@
 
 	writew(0x0FFF, &regs->WOLSRClr);
 
+	if (spd_dpx == SPD_DPX_1000_FULL)
+		goto mac_done;
+
+	if (spd_dpx != SPD_DPX_AUTO)
+		goto advertise_done;
+
 	if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
 		if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
 			MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
@@ -2978,6 +2985,7 @@
 	if (vptr->mii_status & VELOCITY_SPEED_1000)
 		MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
 
+advertise_done:
 	BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
 
 	{
@@ -2987,6 +2995,7 @@
 		writeb(GCR, &regs->CHIPGCR);
 	}
 
+mac_done:
 	BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
 	/* Turn on SWPTAG just before entering power mode */
 	BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index aa2e69b..d722753 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -361,7 +361,7 @@
 #define MAC_REG_CHIPGSR     0x9C
 #define MAC_REG_TESTCFG     0x9D
 #define MAC_REG_DEBUG       0x9E
-#define MAC_REG_CHIPGCR     0x9F
+#define MAC_REG_CHIPGCR     0x9F	/* Chip Operation and Diagnostic Control */
 #define MAC_REG_WOLCR0_SET  0xA0
 #define MAC_REG_WOLCR1_SET  0xA1
 #define MAC_REG_PWCFG_SET   0xA2
@@ -848,10 +848,10 @@
  *	Bits in CHIPGCR register
  */
 
-#define CHIPGCR_FCGMII      0x80	/* enable GMII mode */
-#define CHIPGCR_FCFDX       0x40
+#define CHIPGCR_FCGMII      0x80	/* force GMII (else MII only) */
+#define CHIPGCR_FCFDX       0x40	/* force full duplex */
 #define CHIPGCR_FCRESV      0x20
-#define CHIPGCR_FCMODE      0x10
+#define CHIPGCR_FCMODE      0x10	/* enable MAC forced mode */
 #define CHIPGCR_LPSOPT      0x08
 #define CHIPGCR_TM1US       0x04
 #define CHIPGCR_TM0US       0x02
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 90a23e4..82dba5a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -446,6 +446,20 @@
 	}
 }
 
+static void virtnet_napi_enable(struct virtnet_info *vi)
+{
+	napi_enable(&vi->napi);
+
+	/* If all buffers were filled by other side before we napi_enabled, we
+	 * won't get another interrupt, so process any outstanding packets
+	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
+	 * We synchronize against interrupts via NAPI_STATE_SCHED */
+	if (napi_schedule_prep(&vi->napi)) {
+		virtqueue_disable_cb(vi->rvq);
+		__napi_schedule(&vi->napi);
+	}
+}
+
 static void refill_work(struct work_struct *work)
 {
 	struct virtnet_info *vi;
@@ -454,7 +468,7 @@
 	vi = container_of(work, struct virtnet_info, refill.work);
 	napi_disable(&vi->napi);
 	still_empty = !try_fill_recv(vi, GFP_KERNEL);
-	napi_enable(&vi->napi);
+	virtnet_napi_enable(vi);
 
 	/* In theory, this can happen: if we don't get any buffers in
 	 * we will *never* try to fill again. */
@@ -638,16 +652,7 @@
 {
 	struct virtnet_info *vi = netdev_priv(dev);
 
-	napi_enable(&vi->napi);
-
-	/* If all buffers were filled by other side before we napi_enabled, we
-	 * won't get another interrupt, so process any outstanding packets
-	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
-	 * We synchronize against interrupts via NAPI_STATE_SCHED */
-	if (napi_schedule_prep(&vi->napi)) {
-		virtqueue_disable_cb(vi->rvq);
-		__napi_schedule(&vi->napi);
-	}
+	virtnet_napi_enable(vi);
 	return 0;
 }
 
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d143e8b..cc14b4a 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -48,6 +48,9 @@
 static int enable_mq = 1;
 static int irq_share_mode;
 
+static void
+vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
+
 /*
  *    Enable/Disable the given intr
  */
@@ -139,9 +142,13 @@
 {
 	u32 ret;
 	int i;
+	unsigned long flags;
 
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
 	ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+
 	adapter->link_speed = ret >> 16;
 	if (ret & 1) { /* Link is up. */
 		printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
@@ -183,8 +190,10 @@
 
 	/* Check if there is an error on xmit/recv queues */
 	if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
+		spin_lock(&adapter->cmd_lock);
 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 				       VMXNET3_CMD_GET_QUEUE_STATUS);
+		spin_unlock(&adapter->cmd_lock);
 
 		for (i = 0; i < adapter->num_tx_queues; i++)
 			if (adapter->tqd_start[i].status.stopped)
@@ -804,30 +813,25 @@
 				   skb_transport_header(skb))->doff * 4;
 		ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
 	} else {
-		unsigned int pull_size;
-
 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
 			ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
 
 			if (ctx->ipv4) {
 				struct iphdr *iph = (struct iphdr *)
 						    skb_network_header(skb);
-				if (iph->protocol == IPPROTO_TCP) {
-					pull_size = ctx->eth_ip_hdr_size +
-						    sizeof(struct tcphdr);
-
-					if (unlikely(!pskb_may_pull(skb,
-								pull_size))) {
-						goto err;
-					}
+				if (iph->protocol == IPPROTO_TCP)
 					ctx->l4_hdr_size = ((struct tcphdr *)
 					   skb_transport_header(skb))->doff * 4;
-				} else if (iph->protocol == IPPROTO_UDP) {
+				else if (iph->protocol == IPPROTO_UDP)
+					/*
+					 * Use tcp header size so that bytes to
+					 * be copied are more than required by
+					 * the device.
+					 */
 					ctx->l4_hdr_size =
-							sizeof(struct udphdr);
-				} else {
+							sizeof(struct tcphdr);
+				else
 					ctx->l4_hdr_size = 0;
-				}
 			} else {
 				/* for simplicity, don't copy L4 headers */
 				ctx->l4_hdr_size = 0;
@@ -1859,18 +1863,14 @@
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	struct Vmxnet3_DriverShared *shared = adapter->shared;
 	u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+	unsigned long flags;
 
 	if (grp) {
 		/* add vlan rx stripping. */
 		if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
 			int i;
-			struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
 			adapter->vlan_grp = grp;
 
-			/* update FEATURES to device */
-			devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
-			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
-					       VMXNET3_CMD_UPDATE_FEATURE);
 			/*
 			 *  Clear entire vfTable; then enable untagged pkts.
 			 *  Note: setting one entry in vfTable to non-zero turns
@@ -1880,8 +1880,10 @@
 				vfTable[i] = 0;
 
 			VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
+			spin_lock_irqsave(&adapter->cmd_lock, flags);
 			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 					       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+			spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 		} else {
 			printk(KERN_ERR "%s: vlan_rx_register when device has "
 			       "no NETIF_F_HW_VLAN_RX\n", netdev->name);
@@ -1900,13 +1902,10 @@
 				 */
 				vfTable[i] = 0;
 			}
+			spin_lock_irqsave(&adapter->cmd_lock, flags);
 			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 					       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
-
-			/* update FEATURES to device */
-			devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
-			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
-					       VMXNET3_CMD_UPDATE_FEATURE);
+			spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 		}
 	}
 }
@@ -1939,10 +1938,13 @@
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+	unsigned long flags;
 
 	VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 }
 
 
@@ -1951,10 +1953,13 @@
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
+	unsigned long flags;
 
 	VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_UPDATE_VLAN_FILTERS);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 }
 
 
@@ -1985,6 +1990,7 @@
 vmxnet3_set_mc(struct net_device *netdev)
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+	unsigned long flags;
 	struct Vmxnet3_RxFilterConf *rxConf =
 					&adapter->shared->devRead.rxFilterConf;
 	u8 *new_table = NULL;
@@ -2020,6 +2026,7 @@
 		rxConf->mfTablePA = 0;
 	}
 
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	if (new_mode != rxConf->rxMode) {
 		rxConf->rxMode = cpu_to_le32(new_mode);
 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -2028,6 +2035,7 @@
 
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_UPDATE_MAC_FILTERS);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 
 	kfree(new_table);
 }
@@ -2080,10 +2088,8 @@
 		devRead->misc.uptFeatures |= UPT1_F_LRO;
 		devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
 	}
-	if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) &&
-	    adapter->vlan_grp) {
+	if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
 		devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
-	}
 
 	devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
 	devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
@@ -2168,6 +2174,8 @@
 	/* rx filter settings */
 	devRead->rxFilterConf.rxMode = 0;
 	vmxnet3_restore_vlan(adapter);
+	vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
+
 	/* the rest are already zeroed */
 }
 
@@ -2177,6 +2185,7 @@
 {
 	int err, i;
 	u32 ret;
+	unsigned long flags;
 
 	dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
 		" ring sizes %u %u %u\n", adapter->netdev->name,
@@ -2206,9 +2215,11 @@
 			       adapter->shared_pa));
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
 			       adapter->shared_pa));
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_ACTIVATE_DEV);
 	ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 
 	if (ret != 0) {
 		printk(KERN_ERR "Failed to activate dev %s: error %u\n",
@@ -2255,7 +2266,10 @@
 void
 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
 {
+	unsigned long flags;
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 }
 
 
@@ -2263,12 +2277,15 @@
 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
 {
 	int i;
+	unsigned long flags;
 	if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
 		return 0;
 
 
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_QUIESCE_DEV);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 	vmxnet3_disable_all_intrs(adapter);
 
 	for (i = 0; i < adapter->num_rx_queues; i++)
@@ -2426,7 +2443,7 @@
 	sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
 	ring0_size = adapter->rx_queue[0].rx_ring[0].size;
 	ring0_size = (ring0_size + sz - 1) / sz * sz;
-	ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE /
+	ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
 			   sz * sz);
 	ring1_size = adapter->rx_queue[0].rx_ring[1].size;
 	comp_size = ring0_size + ring1_size;
@@ -2695,7 +2712,7 @@
 			break;
 		} else {
 			/* If fails to enable required number of MSI-x vectors
-			 * try enabling 3 of them. One each for rx, tx and event
+			 * try enabling minimum number of vectors required.
 			 */
 			vectors = vector_threshold;
 			printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
@@ -2718,9 +2735,11 @@
 	u32 cfg;
 
 	/* intr settings */
+	spin_lock(&adapter->cmd_lock);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_GET_CONF_INTR);
 	cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+	spin_unlock(&adapter->cmd_lock);
 	adapter->intr.type = cfg & 0x3;
 	adapter->intr.mask_mode = (cfg >> 2) & 0x3;
 
@@ -2755,7 +2774,7 @@
 		 */
 		if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
 			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
-			    || adapter->num_rx_queues != 2) {
+			    || adapter->num_rx_queues != 1) {
 				adapter->share_intr = VMXNET3_INTR_TXSHARE;
 				printk(KERN_ERR "Number of rx queues : 1\n");
 				adapter->num_rx_queues = 1;
@@ -2905,6 +2924,7 @@
 	adapter->netdev = netdev;
 	adapter->pdev = pdev;
 
+	spin_lock_init(&adapter->cmd_lock);
 	adapter->shared = pci_alloc_consistent(adapter->pdev,
 			  sizeof(struct Vmxnet3_DriverShared),
 			  &adapter->shared_pa);
@@ -3108,11 +3128,15 @@
 	u8 *arpreq;
 	struct in_device *in_dev;
 	struct in_ifaddr *ifa;
+	unsigned long flags;
 	int i = 0;
 
 	if (!netif_running(netdev))
 		return 0;
 
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		napi_disable(&adapter->rx_queue[i].napi);
+
 	vmxnet3_disable_all_intrs(adapter);
 	vmxnet3_free_irqs(adapter);
 	vmxnet3_free_intr_resources(adapter);
@@ -3188,8 +3212,10 @@
 	adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
 								 pmConf));
 
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_UPDATE_PMCFG);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 
 	pci_save_state(pdev);
 	pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
@@ -3204,7 +3230,8 @@
 static int
 vmxnet3_resume(struct device *device)
 {
-	int err;
+	int err, i = 0;
+	unsigned long flags;
 	struct pci_dev *pdev = to_pci_dev(device);
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -3232,10 +3259,14 @@
 
 	pci_enable_wake(pdev, PCI_D0, 0);
 
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_UPDATE_PMCFG);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 	vmxnet3_alloc_intr_resources(adapter);
 	vmxnet3_request_irqs(adapter);
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		napi_enable(&adapter->rx_queue[i].napi);
 	vmxnet3_enable_all_intrs(adapter);
 
 	return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 8e17fc8..81254be 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -45,6 +45,7 @@
 vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+	unsigned long flags;
 
 	if (adapter->rxcsum != val) {
 		adapter->rxcsum = val;
@@ -56,8 +57,10 @@
 				adapter->shared->devRead.misc.uptFeatures &=
 				~UPT1_F_RXCSUM;
 
+			spin_lock_irqsave(&adapter->cmd_lock, flags);
 			VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 					       VMXNET3_CMD_UPDATE_FEATURE);
+			spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 		}
 	}
 	return 0;
@@ -68,76 +71,78 @@
 static const struct vmxnet3_stat_desc
 vmxnet3_tq_dev_stats[] = {
 	/* description,         offset */
-	{ "TSO pkts tx",        offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
-	{ "TSO bytes tx",       offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
-	{ "ucast pkts tx",      offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
-	{ "ucast bytes tx",     offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
-	{ "mcast pkts tx",      offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
-	{ "mcast bytes tx",     offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
-	{ "bcast pkts tx",      offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
-	{ "bcast bytes tx",     offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
-	{ "pkts tx err",        offsetof(struct UPT1_TxStats, pktsTxError) },
-	{ "pkts tx discard",    offsetof(struct UPT1_TxStats, pktsTxDiscard) },
+	{ "Tx Queue#",        0 },
+	{ "  TSO pkts tx",	offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
+	{ "  TSO bytes tx",	offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
+	{ "  ucast pkts tx",	offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
+	{ "  ucast bytes tx",	offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
+	{ "  mcast pkts tx",	offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
+	{ "  mcast bytes tx",	offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
+	{ "  bcast pkts tx",	offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
+	{ "  bcast bytes tx",	offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
+	{ "  pkts tx err",	offsetof(struct UPT1_TxStats, pktsTxError) },
+	{ "  pkts tx discard",	offsetof(struct UPT1_TxStats, pktsTxDiscard) },
 };
 
 /* per tq stats maintained by the driver */
 static const struct vmxnet3_stat_desc
 vmxnet3_tq_driver_stats[] = {
 	/* description,         offset */
-	{"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
-					drop_total) },
-	{ "   too many frags",  offsetof(struct vmxnet3_tq_driver_stats,
-					drop_too_many_frags) },
-	{ "   giant hdr",       offsetof(struct vmxnet3_tq_driver_stats,
-					drop_oversized_hdr) },
-	{ "   hdr err",         offsetof(struct vmxnet3_tq_driver_stats,
-					drop_hdr_inspect_err) },
-	{ "   tso",             offsetof(struct vmxnet3_tq_driver_stats,
-					drop_tso) },
-	{ "ring full",          offsetof(struct vmxnet3_tq_driver_stats,
-					tx_ring_full) },
-	{ "pkts linearized",    offsetof(struct vmxnet3_tq_driver_stats,
-					linearized) },
-	{ "hdr cloned",         offsetof(struct vmxnet3_tq_driver_stats,
-					copy_skb_header) },
-	{ "giant hdr",          offsetof(struct vmxnet3_tq_driver_stats,
-					oversized_hdr) },
+	{"  drv dropped tx total",	offsetof(struct vmxnet3_tq_driver_stats,
+						 drop_total) },
+	{ "     too many frags", offsetof(struct vmxnet3_tq_driver_stats,
+					  drop_too_many_frags) },
+	{ "     giant hdr",	offsetof(struct vmxnet3_tq_driver_stats,
+					 drop_oversized_hdr) },
+	{ "     hdr err",	offsetof(struct vmxnet3_tq_driver_stats,
+					 drop_hdr_inspect_err) },
+	{ "     tso",		offsetof(struct vmxnet3_tq_driver_stats,
+					 drop_tso) },
+	{ "  ring full",	offsetof(struct vmxnet3_tq_driver_stats,
+					 tx_ring_full) },
+	{ "  pkts linearized",	offsetof(struct vmxnet3_tq_driver_stats,
+					 linearized) },
+	{ "  hdr cloned",	offsetof(struct vmxnet3_tq_driver_stats,
+					 copy_skb_header) },
+	{ "  giant hdr",	offsetof(struct vmxnet3_tq_driver_stats,
+					 oversized_hdr) },
 };
 
 /* per rq stats maintained by the device */
 static const struct vmxnet3_stat_desc
 vmxnet3_rq_dev_stats[] = {
-	{ "LRO pkts rx",        offsetof(struct UPT1_RxStats, LROPktsRxOK) },
-	{ "LRO byte rx",        offsetof(struct UPT1_RxStats, LROBytesRxOK) },
-	{ "ucast pkts rx",      offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
-	{ "ucast bytes rx",     offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
-	{ "mcast pkts rx",      offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
-	{ "mcast bytes rx",     offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
-	{ "bcast pkts rx",      offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
-	{ "bcast bytes rx",     offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
-	{ "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
-	{ "pkts rx err",        offsetof(struct UPT1_RxStats, pktsRxError) },
+	{ "Rx Queue#",        0 },
+	{ "  LRO pkts rx",	offsetof(struct UPT1_RxStats, LROPktsRxOK) },
+	{ "  LRO byte rx",	offsetof(struct UPT1_RxStats, LROBytesRxOK) },
+	{ "  ucast pkts rx",	offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
+	{ "  ucast bytes rx",	offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
+	{ "  mcast pkts rx",	offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
+	{ "  mcast bytes rx",	offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
+	{ "  bcast pkts rx",	offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
+	{ "  bcast bytes rx",	offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
+	{ "  pkts rx OOB",	offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
+	{ "  pkts rx err",	offsetof(struct UPT1_RxStats, pktsRxError) },
 };
 
 /* per rq stats maintained by the driver */
 static const struct vmxnet3_stat_desc
 vmxnet3_rq_driver_stats[] = {
 	/* description,         offset */
-	{ "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
-					   drop_total) },
-	{ "   err",            offsetof(struct vmxnet3_rq_driver_stats,
-					drop_err) },
-	{ "   fcs",            offsetof(struct vmxnet3_rq_driver_stats,
-					drop_fcs) },
-	{ "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
-					rx_buf_alloc_failure) },
+	{ "  drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
+					     drop_total) },
+	{ "     err",		offsetof(struct vmxnet3_rq_driver_stats,
+					 drop_err) },
+	{ "     fcs",		offsetof(struct vmxnet3_rq_driver_stats,
+					 drop_fcs) },
+	{ "  rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
+					  rx_buf_alloc_failure) },
 };
 
 /* gloabl stats maintained by the driver */
 static const struct vmxnet3_stat_desc
 vmxnet3_global_stats[] = {
 	/* description,         offset */
-	{ "tx timeout count",   offsetof(struct vmxnet3_adapter,
+	{ "tx timeout count",	offsetof(struct vmxnet3_adapter,
 					 tx_timeout_count) }
 };
 
@@ -151,12 +156,15 @@
 	struct UPT1_TxStats *devTxStats;
 	struct UPT1_RxStats *devRxStats;
 	struct net_device_stats *net_stats = &netdev->stats;
+	unsigned long flags;
 	int i;
 
 	adapter = netdev_priv(netdev);
 
 	/* Collect the dev stats into the shared area */
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 
 	memset(net_stats, 0, sizeof(*net_stats));
 	for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -193,12 +201,15 @@
 static int
 vmxnet3_get_sset_count(struct net_device *netdev, int sset)
 {
+	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	switch (sset) {
 	case ETH_SS_STATS:
-		return ARRAY_SIZE(vmxnet3_tq_dev_stats) +
-			ARRAY_SIZE(vmxnet3_tq_driver_stats) +
-			ARRAY_SIZE(vmxnet3_rq_dev_stats) +
-			ARRAY_SIZE(vmxnet3_rq_driver_stats) +
+		return (ARRAY_SIZE(vmxnet3_tq_dev_stats) +
+			ARRAY_SIZE(vmxnet3_tq_driver_stats)) *
+		       adapter->num_tx_queues +
+		       (ARRAY_SIZE(vmxnet3_rq_dev_stats) +
+			ARRAY_SIZE(vmxnet3_rq_driver_stats)) *
+		       adapter->num_rx_queues +
 			ARRAY_SIZE(vmxnet3_global_stats);
 	default:
 		return -EOPNOTSUPP;
@@ -206,10 +217,16 @@
 }
 
 
+/* Should be multiple of 4 */
+#define NUM_TX_REGS	8
+#define NUM_RX_REGS	12
+
 static int
 vmxnet3_get_regs_len(struct net_device *netdev)
 {
-	return 20 * sizeof(u32);
+	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+	return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) +
+		adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32));
 }
 
 
@@ -240,29 +257,37 @@
 static void
 vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
 {
+	 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	if (stringset == ETH_SS_STATS) {
-		int i;
+		int i, j;
+		for (j = 0; j < adapter->num_tx_queues; j++) {
+			for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
+				memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
+				       ETH_GSTRING_LEN);
+				buf += ETH_GSTRING_LEN;
+			}
+			for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats);
+			     i++) {
+				memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
+				       ETH_GSTRING_LEN);
+				buf += ETH_GSTRING_LEN;
+			}
+		}
 
-		for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
-			memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
-			       ETH_GSTRING_LEN);
-			buf += ETH_GSTRING_LEN;
+		for (j = 0; j < adapter->num_rx_queues; j++) {
+			for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
+				memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
+				       ETH_GSTRING_LEN);
+				buf += ETH_GSTRING_LEN;
+			}
+			for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats);
+			     i++) {
+				memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
+				       ETH_GSTRING_LEN);
+				buf += ETH_GSTRING_LEN;
+			}
 		}
-		for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) {
-			memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
-			       ETH_GSTRING_LEN);
-			buf += ETH_GSTRING_LEN;
-		}
-		for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
-			memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
-			       ETH_GSTRING_LEN);
-			buf += ETH_GSTRING_LEN;
-		}
-		for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) {
-			memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
-			       ETH_GSTRING_LEN);
-			buf += ETH_GSTRING_LEN;
-		}
+
 		for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) {
 			memcpy(buf, vmxnet3_global_stats[i].desc,
 				ETH_GSTRING_LEN);
@@ -277,6 +302,7 @@
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
 	u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
+	unsigned long flags;
 
 	if (data & ~ETH_FLAG_LRO)
 		return -EOPNOTSUPP;
@@ -292,8 +318,10 @@
 		else
 			adapter->shared->devRead.misc.uptFeatures &=
 							~UPT1_F_LRO;
+		spin_lock_irqsave(&adapter->cmd_lock, flags);
 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 				       VMXNET3_CMD_UPDATE_FEATURE);
+		spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 	}
 	return 0;
 }
@@ -303,30 +331,41 @@
 			  struct ethtool_stats *stats, u64  *buf)
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+	unsigned long flags;
 	u8 *base;
 	int i;
 	int j = 0;
 
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 
 	/* this does assume each counter is 64-bit wide */
-/* TODO change this for multiple queues */
+	for (j = 0; j < adapter->num_tx_queues; j++) {
+		base = (u8 *)&adapter->tqd_start[j].stats;
+		*buf++ = (u64)j;
+		for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
+			*buf++ = *(u64 *)(base +
+					  vmxnet3_tq_dev_stats[i].offset);
 
-	base = (u8 *)&adapter->tqd_start[j].stats;
-	for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
-		*buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset);
+		base = (u8 *)&adapter->tx_queue[j].stats;
+		for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
+			*buf++ = *(u64 *)(base +
+					  vmxnet3_tq_driver_stats[i].offset);
+	}
 
-	base = (u8 *)&adapter->tx_queue[j].stats;
-	for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
-		*buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset);
+	for (j = 0; j < adapter->num_tx_queues; j++) {
+		base = (u8 *)&adapter->rqd_start[j].stats;
+		*buf++ = (u64) j;
+		for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
+			*buf++ = *(u64 *)(base +
+					  vmxnet3_rq_dev_stats[i].offset);
 
-	base = (u8 *)&adapter->rqd_start[j].stats;
-	for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
-		*buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
-
-	base = (u8 *)&adapter->rx_queue[j].stats;
-	for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
-		*buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset);
+		base = (u8 *)&adapter->rx_queue[j].stats;
+		for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
+			*buf++ = *(u64 *)(base +
+					  vmxnet3_rq_driver_stats[i].offset);
+	}
 
 	base = (u8 *)adapter;
 	for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
@@ -339,7 +378,7 @@
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	u32 *buf = p;
-	int i = 0;
+	int i = 0, j = 0;
 
 	memset(p, 0, vmxnet3_get_regs_len(netdev));
 
@@ -348,31 +387,35 @@
 	/* Update vmxnet3_get_regs_len if we want to dump more registers */
 
 	/* make each ring use multiple of 16 bytes */
-/* TODO change this for multiple queues */
-	buf[0] = adapter->tx_queue[i].tx_ring.next2fill;
-	buf[1] = adapter->tx_queue[i].tx_ring.next2comp;
-	buf[2] = adapter->tx_queue[i].tx_ring.gen;
-	buf[3] = 0;
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		buf[j++] = adapter->tx_queue[i].tx_ring.next2fill;
+		buf[j++] = adapter->tx_queue[i].tx_ring.next2comp;
+		buf[j++] = adapter->tx_queue[i].tx_ring.gen;
+		buf[j++] = 0;
 
-	buf[4] = adapter->tx_queue[i].comp_ring.next2proc;
-	buf[5] = adapter->tx_queue[i].comp_ring.gen;
-	buf[6] = adapter->tx_queue[i].stopped;
-	buf[7] = 0;
+		buf[j++] = adapter->tx_queue[i].comp_ring.next2proc;
+		buf[j++] = adapter->tx_queue[i].comp_ring.gen;
+		buf[j++] = adapter->tx_queue[i].stopped;
+		buf[j++] = 0;
+	}
 
-	buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill;
-	buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp;
-	buf[10] = adapter->rx_queue[i].rx_ring[0].gen;
-	buf[11] = 0;
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill;
+		buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp;
+		buf[j++] = adapter->rx_queue[i].rx_ring[0].gen;
+		buf[j++] = 0;
 
-	buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill;
-	buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp;
-	buf[14] = adapter->rx_queue[i].rx_ring[1].gen;
-	buf[15] = 0;
+		buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill;
+		buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp;
+		buf[j++] = adapter->rx_queue[i].rx_ring[1].gen;
+		buf[j++] = 0;
 
-	buf[16] = adapter->rx_queue[i].comp_ring.next2proc;
-	buf[17] = adapter->rx_queue[i].comp_ring.gen;
-	buf[18] = 0;
-	buf[19] = 0;
+		buf[j++] = adapter->rx_queue[i].comp_ring.next2proc;
+		buf[j++] = adapter->rx_queue[i].comp_ring.gen;
+		buf[j++] = 0;
+		buf[j++] = 0;
+	}
+
 }
 
 
@@ -574,6 +617,7 @@
 		      const struct ethtool_rxfh_indir *p)
 {
 	unsigned int i;
+	unsigned long flags;
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	struct UPT1_RSSConf *rssConf = adapter->rss_conf;
 
@@ -592,8 +636,10 @@
 	for (i = 0; i < rssConf->indTableSize; i++)
 		rssConf->indTable[i] = p->ring_index[i];
 
+	spin_lock_irqsave(&adapter->cmd_lock, flags);
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 			       VMXNET3_CMD_UPDATE_RSSIDT);
+	spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 
 	return 0;
 
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 7fadeed..fb5d245 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,10 +68,10 @@
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.0.16.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.0.25.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01001000
+#define VMXNET3_DRIVER_VERSION_NUM      0x01001900
 
 #if defined(CONFIG_PCI_MSI)
 	/* RSS only makes sense if MSI-X is supported. */
@@ -289,7 +289,7 @@
 
 #define VMXNET3_LINUX_MAX_MSIX_VECT     (VMXNET3_DEVICE_MAX_TX_QUEUES + \
 					 VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
-#define VMXNET3_LINUX_MIN_MSIX_VECT     3    /* 1 for each : tx, rx and event */
+#define VMXNET3_LINUX_MIN_MSIX_VECT     2 /* 1 for tx-rx pair and 1 for event */
 
 
 struct vmxnet3_intr {
@@ -317,6 +317,7 @@
 	struct vmxnet3_rx_queue		rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
 	struct vlan_group		*vlan_grp;
 	struct vmxnet3_intr		intr;
+	spinlock_t			cmd_lock;
 	struct Vmxnet3_DriverShared	*shared;
 	struct Vmxnet3_PMConf		*pm_conf;
 	struct Vmxnet3_TxQueueDesc	*tqd_start;     /* all tx queue desc */
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 01c05f5..e74e4b4 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -387,8 +387,8 @@
 		data1 = steer_ctrl = 0;
 
 		status = vxge_hw_vpath_fw_api(vpath,
-			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
 			VXGE_HW_FW_API_GET_EPROM_REV,
+			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
 			0, &data0, &data1, &steer_ctrl);
 		if (status != VXGE_HW_OK)
 			break;
@@ -2868,6 +2868,8 @@
 	ring->rxd_init = attr->rxd_init;
 	ring->rxd_term = attr->rxd_term;
 	ring->buffer_mode = config->buffer_mode;
+	ring->tim_rti_cfg1_saved = vp->vpath->tim_rti_cfg1_saved;
+	ring->tim_rti_cfg3_saved = vp->vpath->tim_rti_cfg3_saved;
 	ring->rxds_limit = config->rxds_limit;
 
 	ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
@@ -3511,6 +3513,8 @@
 
 	/* apply "interrupts per txdl" attribute */
 	fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
+	fifo->tim_tti_cfg1_saved = vpath->tim_tti_cfg1_saved;
+	fifo->tim_tti_cfg3_saved = vpath->tim_tti_cfg3_saved;
 
 	if (fifo->config->intr)
 		fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
@@ -3690,7 +3694,7 @@
 	if (status != VXGE_HW_OK)
 		goto exit;
 
-	if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
+	if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) &&
 	    (rts_table !=
 	     VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
 		*data1 = 0;
@@ -4377,6 +4381,8 @@
 		}
 
 		writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+		vpath->tim_tti_cfg1_saved = val64;
+
 		val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
 
 		if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4433,6 +4439,7 @@
 		}
 
 		writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
+		vpath->tim_tti_cfg3_saved = val64;
 	}
 
 	if (config->ring.enable == VXGE_HW_RING_ENABLE) {
@@ -4481,6 +4488,8 @@
 		}
 
 		writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
+		vpath->tim_rti_cfg1_saved = val64;
+
 		val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
 
 		if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4537,6 +4546,7 @@
 		}
 
 		writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
+		vpath->tim_rti_cfg3_saved = val64;
 	}
 
 	val64 = 0;
@@ -4555,26 +4565,6 @@
 	return status;
 }
 
-void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
-{
-	struct __vxge_hw_virtualpath *vpath;
-	struct vxge_hw_vpath_reg __iomem *vp_reg;
-	struct vxge_hw_vp_config *config;
-	u64 val64;
-
-	vpath = &hldev->virtual_paths[vp_id];
-	vp_reg = vpath->vp_reg;
-	config = vpath->vp_config;
-
-	if (config->fifo.enable == VXGE_HW_FIFO_ENABLE &&
-	    config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
-		config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
-		val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
-		val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
-		writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
-	}
-}
-
 /*
  * __vxge_hw_vpath_initialize
  * This routine is the final phase of init which initializes the
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index e249e28..3c53aa7 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -682,6 +682,10 @@
 	u32				vsport_number;
 	u32				max_kdfc_db;
 	u32				max_nofl_db;
+	u64				tim_tti_cfg1_saved;
+	u64				tim_tti_cfg3_saved;
+	u64				tim_rti_cfg1_saved;
+	u64				tim_rti_cfg3_saved;
 
 	struct __vxge_hw_ring *____cacheline_aligned ringh;
 	struct __vxge_hw_fifo *____cacheline_aligned fifoh;
@@ -921,6 +925,9 @@
 	u32					doorbell_cnt;
 	u32					total_db_cnt;
 	u64					rxds_limit;
+	u32					rtimer;
+	u64					tim_rti_cfg1_saved;
+	u64					tim_rti_cfg3_saved;
 
 	enum vxge_hw_status (*callback)(
 			struct __vxge_hw_ring *ringh,
@@ -1000,6 +1007,9 @@
 	u32					per_txdl_space;
 	u32					vp_id;
 	u32					tx_intr_num;
+	u32					rtimer;
+	u64					tim_tti_cfg1_saved;
+	u64					tim_tti_cfg3_saved;
 
 	enum vxge_hw_status (*callback)(
 			struct __vxge_hw_fifo *fifo_handle,
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index c81a651..e40f619 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -371,9 +371,6 @@
 	struct vxge_hw_ring_rxd_info ext_info;
 	vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
 		ring->ndev->name, __func__, __LINE__);
-	ring->pkts_processed = 0;
-
-	vxge_hw_ring_replenish(ringh);
 
 	do {
 		prefetch((char *)dtr + L1_CACHE_BYTES);
@@ -1588,6 +1585,36 @@
 	return ret;
 }
 
+/* Configure CI */
+static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
+{
+	int i = 0;
+
+	/* Enable CI for RTI */
+	if (vdev->config.intr_type == MSI_X) {
+		for (i = 0; i < vdev->no_of_vpath; i++) {
+			struct __vxge_hw_ring *hw_ring;
+
+			hw_ring = vdev->vpaths[i].ring.handle;
+			vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
+		}
+	}
+
+	/* Enable CI for TTI */
+	for (i = 0; i < vdev->no_of_vpath; i++) {
+		struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
+		vxge_hw_vpath_tti_ci_set(hw_fifo);
+		/*
+		 * For Inta (with or without napi), Set CI ON for only one
+		 * vpath. (Have only one free running timer).
+		 */
+		if ((vdev->config.intr_type == INTA) && (i == 0))
+			break;
+	}
+
+	return;
+}
+
 static int do_vxge_reset(struct vxgedev *vdev, int event)
 {
 	enum vxge_hw_status status;
@@ -1753,6 +1780,9 @@
 		netif_tx_wake_all_queues(vdev->ndev);
 	}
 
+	/* configure CI */
+	vxge_config_ci_for_tti_rti(vdev);
+
 out:
 	vxge_debug_entryexit(VXGE_TRACE,
 		"%s:%d  Exiting...", __func__, __LINE__);
@@ -1793,22 +1823,29 @@
  */
 static int vxge_poll_msix(struct napi_struct *napi, int budget)
 {
-	struct vxge_ring *ring =
-		container_of(napi, struct vxge_ring, napi);
+	struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
+	int pkts_processed;
 	int budget_org = budget;
-	ring->budget = budget;
 
+	ring->budget = budget;
+	ring->pkts_processed = 0;
 	vxge_hw_vpath_poll_rx(ring->handle);
+	pkts_processed = ring->pkts_processed;
 
 	if (ring->pkts_processed < budget_org) {
 		napi_complete(napi);
+
 		/* Re enable the Rx interrupts for the vpath */
 		vxge_hw_channel_msix_unmask(
 				(struct __vxge_hw_channel *)ring->handle,
 				ring->rx_vector_no);
+		mmiowb();
 	}
 
-	return ring->pkts_processed;
+	/* We are copying and returning the local variable, in case if after
+	 * clearing the msix interrupt above, if the interrupt fires right
+	 * away which can preempt this NAPI thread */
+	return pkts_processed;
 }
 
 static int vxge_poll_inta(struct napi_struct *napi, int budget)
@@ -1824,6 +1861,7 @@
 	for (i = 0; i < vdev->no_of_vpath; i++) {
 		ring = &vdev->vpaths[i].ring;
 		ring->budget = budget;
+		ring->pkts_processed = 0;
 		vxge_hw_vpath_poll_rx(ring->handle);
 		pkts_processed += ring->pkts_processed;
 		budget -= ring->pkts_processed;
@@ -2054,6 +2092,7 @@
 					netdev_get_tx_queue(vdev->ndev, 0);
 			vpath->fifo.indicate_max_pkts =
 				vdev->config.fifo_indicate_max_pkts;
+			vpath->fifo.tx_vector_no = 0;
 			vpath->ring.rx_vector_no = 0;
 			vpath->ring.rx_csum = vdev->rx_csum;
 			vpath->ring.rx_hwts = vdev->rx_hwts;
@@ -2079,6 +2118,61 @@
 	return VXGE_HW_OK;
 }
 
+/**
+ *  adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
+ *  if the interrupts are not within a range
+ *  @fifo: pointer to transmit fifo structure
+ *  Description: The function changes boundary timer and restriction timer
+ *  value depends on the traffic
+ *  Return Value: None
+ */
+static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
+{
+	fifo->interrupt_count++;
+	if (jiffies > fifo->jiffies + HZ / 100) {
+		struct __vxge_hw_fifo *hw_fifo = fifo->handle;
+
+		fifo->jiffies = jiffies;
+		if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
+		    hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
+			hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
+			vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
+		} else if (hw_fifo->rtimer != 0) {
+			hw_fifo->rtimer = 0;
+			vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
+		}
+		fifo->interrupt_count = 0;
+	}
+}
+
+/**
+ *  adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
+ *  if the interrupts are not within a range
+ *  @ring: pointer to receive ring structure
+ *  Description: The function increases of decreases the packet counts within
+ *  the ranges of traffic utilization, if the interrupts due to this ring are
+ *  not within a fixed range.
+ *  Return Value: Nothing
+ */
+static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
+{
+	ring->interrupt_count++;
+	if (jiffies > ring->jiffies + HZ / 100) {
+		struct __vxge_hw_ring *hw_ring = ring->handle;
+
+		ring->jiffies = jiffies;
+		if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
+		    hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
+			hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
+			vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
+		} else if (hw_ring->rtimer != 0) {
+			hw_ring->rtimer = 0;
+			vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
+		}
+		ring->interrupt_count = 0;
+	}
+}
+
 /*
  *  vxge_isr_napi
  *  @irq: the irq of the device.
@@ -2139,24 +2233,39 @@
 
 #ifdef CONFIG_PCI_MSI
 
-static irqreturn_t
-vxge_tx_msix_handle(int irq, void *dev_id)
+static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
 {
 	struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
 
+	adaptive_coalesce_tx_interrupts(fifo);
+
+	vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
+				  fifo->tx_vector_no);
+
+	vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
+				   fifo->tx_vector_no);
+
 	VXGE_COMPLETE_VPATH_TX(fifo);
 
+	vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
+				    fifo->tx_vector_no);
+
+	mmiowb();
+
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t
-vxge_rx_msix_napi_handle(int irq, void *dev_id)
+static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
 {
 	struct vxge_ring *ring = (struct vxge_ring *)dev_id;
 
-	/* MSIX_IDX for Rx is 1 */
+	adaptive_coalesce_rx_interrupts(ring);
+
 	vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
-					ring->rx_vector_no);
+				  ring->rx_vector_no);
+
+	vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
+				   ring->rx_vector_no);
 
 	napi_schedule(&ring->napi);
 	return IRQ_HANDLED;
@@ -2173,14 +2282,20 @@
 		VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
 
 	for (i = 0; i < vdev->no_of_vpath; i++) {
+		/* Reduce the chance of loosing alarm interrupts by masking
+		 * the vector. A pending bit will be set if an alarm is
+		 * generated and on unmask the interrupt will be fired.
+		 */
 		vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
+		vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
+		mmiowb();
 
 		status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
 			vdev->exec_mode);
 		if (status == VXGE_HW_OK) {
-
 			vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
-					msix_id);
+						  msix_id);
+			mmiowb();
 			continue;
 		}
 		vxge_debug_intr(VXGE_ERR,
@@ -2299,6 +2414,9 @@
 			vpath->ring.rx_vector_no = (vpath->device_id *
 						VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
 
+			vpath->fifo.tx_vector_no = (vpath->device_id *
+						VXGE_HW_VPATH_MSIX_ACTIVE);
+
 			vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
 					       VXGE_ALARM_MSIX_ID);
 		}
@@ -2474,8 +2592,9 @@
 			"%s:vxge:INTA", vdev->ndev->name);
 		vxge_hw_device_set_intr_type(vdev->devh,
 			VXGE_HW_INTR_MODE_IRQLINE);
-		vxge_hw_vpath_tti_ci_set(vdev->devh,
-			vdev->vpaths[0].device_id);
+
+		vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
+
 		ret = request_irq((int) vdev->pdev->irq,
 			vxge_isr_napi,
 			IRQF_SHARED, vdev->desc[0], vdev);
@@ -2745,6 +2864,10 @@
 	}
 
 	netif_tx_start_all_queues(vdev->ndev);
+
+	/* configure CI */
+	vxge_config_ci_for_tti_rti(vdev);
+
 	goto out0;
 
 out2:
@@ -3348,7 +3471,7 @@
 		vxge_debug_init(VXGE_ERR,
 			"%s: vpath memory allocation failed",
 			vdev->ndev->name);
-		ret = -ENODEV;
+		ret = -ENOMEM;
 		goto _out1;
 	}
 
@@ -3369,11 +3492,11 @@
 	if (vdev->config.gro_enable)
 		ndev->features |= NETIF_F_GRO;
 
-	if (register_netdev(ndev)) {
+	ret = register_netdev(ndev);
+	if (ret) {
 		vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
 			"%s: %s : device registration failed!",
 			ndev->name, __func__);
-		ret = -ENODEV;
 		goto _out2;
 	}
 
@@ -3444,6 +3567,11 @@
 	/* in 2.6 will call stop() if device is up */
 	unregister_netdev(dev);
 
+	kfree(vdev->vpaths);
+
+	/* we are safe to free it now */
+	free_netdev(dev);
+
 	vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
 			buf);
 	vxge_debug_entryexit(vdev->level_trace,	"%s: %s:%d  Exiting...", buf,
@@ -3799,7 +3927,7 @@
 		break;
 
 	case MSI_X:
-		device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
+		device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
 		break;
 	}
 
@@ -4335,10 +4463,10 @@
 		goto _exit1;
 	}
 
-	if (pci_request_region(pdev, 0, VXGE_DRIVER_NAME)) {
+	ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
+	if (ret) {
 		vxge_debug_init(VXGE_ERR,
 			"%s : request regions failed", __func__);
-		ret = -ENODEV;
 		goto _exit1;
 	}
 
@@ -4446,7 +4574,7 @@
 			if (!img[i].is_valid)
 				break;
 			vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
-					"%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i,
+					"%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
 					VXGE_EPROM_IMG_MAJOR(img[i].version),
 					VXGE_EPROM_IMG_MINOR(img[i].version),
 					VXGE_EPROM_IMG_FIX(img[i].version),
@@ -4643,8 +4771,9 @@
 _exit5:
 	vxge_device_unregister(hldev);
 _exit4:
-	pci_disable_sriov(pdev);
+	pci_set_drvdata(pdev, NULL);
 	vxge_hw_device_terminate(hldev);
+	pci_disable_sriov(pdev);
 _exit3:
 	iounmap(attr.bar0);
 _exit2:
@@ -4655,7 +4784,7 @@
 	kfree(ll_config);
 	kfree(device_config);
 	driver_config->config_dev_cnt--;
-	pci_set_drvdata(pdev, NULL);
+	driver_config->total_dev_cnt--;
 	return ret;
 }
 
@@ -4668,45 +4797,34 @@
 static void __devexit vxge_remove(struct pci_dev *pdev)
 {
 	struct __vxge_hw_device *hldev;
-	struct vxgedev *vdev = NULL;
-	struct net_device *dev;
-	int i = 0;
+	struct vxgedev *vdev;
+	int i;
 
 	hldev = pci_get_drvdata(pdev);
-
 	if (hldev == NULL)
 		return;
 
-	dev = hldev->ndev;
-	vdev = netdev_priv(dev);
+	vdev = netdev_priv(hldev->ndev);
 
 	vxge_debug_entryexit(vdev->level_trace,	"%s:%d", __func__, __LINE__);
-
 	vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
 			__func__);
-	vxge_device_unregister(hldev);
 
-	for (i = 0; i < vdev->no_of_vpath; i++) {
+	for (i = 0; i < vdev->no_of_vpath; i++)
 		vxge_free_mac_add_list(&vdev->vpaths[i]);
-		vdev->vpaths[i].mcast_addr_cnt = 0;
-		vdev->vpaths[i].mac_addr_cnt = 0;
-	}
 
-	kfree(vdev->vpaths);
-
+	vxge_device_unregister(hldev);
+	pci_set_drvdata(pdev, NULL);
+	/* Do not call pci_disable_sriov here, as it will break child devices */
+	vxge_hw_device_terminate(hldev);
 	iounmap(vdev->bar0);
-
-	/* we are safe to free it now */
-	free_netdev(dev);
+	pci_release_region(pdev, 0);
+	pci_disable_device(pdev);
+	driver_config->config_dev_cnt--;
+	driver_config->total_dev_cnt--;
 
 	vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
 			__func__, __LINE__);
-
-	vxge_hw_device_terminate(hldev);
-
-	pci_disable_device(pdev);
-	pci_release_region(pdev, 0);
-	pci_set_drvdata(pdev, NULL);
 	vxge_debug_entryexit(vdev->level_trace,	"%s:%d  Exiting...", __func__,
 			     __LINE__);
 }
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 5746fed..40474f0 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -59,11 +59,13 @@
 #define VXGE_TTI_LTIMER_VAL	1000
 #define VXGE_T1A_TTI_LTIMER_VAL	80
 #define VXGE_TTI_RTIMER_VAL	0
+#define VXGE_TTI_RTIMER_ADAPT_VAL	10
 #define VXGE_T1A_TTI_RTIMER_VAL	400
 #define VXGE_RTI_BTIMER_VAL	250
 #define VXGE_RTI_LTIMER_VAL	100
 #define VXGE_RTI_RTIMER_VAL	0
-#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
+#define VXGE_RTI_RTIMER_ADAPT_VAL	15
+#define VXGE_FIFO_INDICATE_MAX_PKTS	VXGE_DEF_FIFO_LENGTH
 #define VXGE_ISR_POLLING_CNT 	8
 #define VXGE_MAX_CONFIG_DEV	0xFF
 #define VXGE_EXEC_MODE_DISABLE	0
@@ -107,6 +109,14 @@
 #define RTI_T1A_RX_UFC_C	50
 #define RTI_T1A_RX_UFC_D	60
 
+/*
+ * The interrupt rate is maintained at 3k per second with the moderation
+ * parameters for most traffic but not all. This is the maximum interrupt
+ * count allowed per function with INTA or per vector in the case of
+ * MSI-X in a 10 millisecond time period. Enabled only for Titan 1A.
+ */
+#define VXGE_T1A_MAX_INTERRUPT_COUNT	100
+#define VXGE_T1A_MAX_TX_INTERRUPT_COUNT	200
 
 /* Milli secs timer period */
 #define VXGE_TIMER_DELAY		10000
@@ -247,6 +257,11 @@
 	int tx_steering_type;
 	int indicate_max_pkts;
 
+	/* Adaptive interrupt moderation parameters used in T1A */
+	unsigned long interrupt_count;
+	unsigned long jiffies;
+
+	u32 tx_vector_no;
 	/* Tx stats */
 	struct vxge_fifo_stats stats;
 } ____cacheline_aligned;
@@ -271,6 +286,10 @@
 	 */
 	int driver_id;
 
+	/* Adaptive interrupt moderation parameters used in T1A */
+	unsigned long interrupt_count;
+	unsigned long jiffies;
+
 	/* copy of the flag indicating whether rx_csum is to be used */
 	u32 rx_csum:1,
 	    rx_hwts:1;
@@ -286,7 +305,7 @@
 
 	int vlan_tag_strip;
 	struct vlan_group *vlgrp;
-	int rx_vector_no;
+	u32 rx_vector_no;
 	enum vxge_hw_status last_status;
 
 	/* Rx stats */
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 4c10d6c..8674f33 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -218,6 +218,68 @@
 	return status;
 }
 
+void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
+{
+	struct vxge_hw_vpath_reg __iomem *vp_reg;
+	struct vxge_hw_vp_config *config;
+	u64 val64;
+
+	if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
+		return;
+
+	vp_reg = fifo->vp_reg;
+	config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
+
+	if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
+		config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
+		val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+		val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+		fifo->tim_tti_cfg1_saved = val64;
+		writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
+	}
+}
+
+void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
+{
+	u64 val64 = ring->tim_rti_cfg1_saved;
+
+	val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+	ring->tim_rti_cfg1_saved = val64;
+	writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
+}
+
+void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
+{
+	u64 val64 = fifo->tim_tti_cfg3_saved;
+	u64 timer = (fifo->rtimer * 1000) / 272;
+
+	val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
+	if (timer)
+		val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
+			VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
+
+	writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
+	/* tti_cfg3_saved is not updated again because it is
+	 * initialized at one place only - init time.
+	 */
+}
+
+void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
+{
+	u64 val64 = ring->tim_rti_cfg3_saved;
+	u64 timer = (ring->rtimer * 1000) / 272;
+
+	val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
+	if (timer)
+		val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
+			VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
+
+	writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
+	/* rti_cfg3_saved is not updated again because it is
+	 * initialized at one place only - init time.
+	 */
+}
+
 /**
  * vxge_hw_channel_msix_mask - Mask MSIX Vector.
  * @channeh: Channel for rx or tx handle
@@ -254,6 +316,23 @@
 }
 
 /**
+ * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
+ * @channel: Channel for rx or tx handle
+ * @msix_id:  MSI ID
+ *
+ * The function unmasks the msix interrupt for the given msix_id
+ * if configured in MSIX oneshot mode
+ *
+ * Returns: 0
+ */
+void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
+{
+	__vxge_hw_pio_mem_write32_upper(
+		(u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
+		&channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
+}
+
+/**
  * vxge_hw_device_set_intr_type - Updates the configuration
  *		with new interrupt type.
  * @hldev: HW device handle.
@@ -2191,19 +2270,14 @@
 	if (vpath->hldev->config.intr_mode ==
 					VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
 		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
+				VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
+				0, 32), &vp_reg->one_shot_vect0_en);
+		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
 				VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
 				0, 32), &vp_reg->one_shot_vect1_en);
-	}
-
-	if (vpath->hldev->config.intr_mode ==
-		VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
 		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
 				VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
 				0, 32), &vp_reg->one_shot_vect2_en);
-
-		__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
-				VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
-				0, 32), &vp_reg->one_shot_vect3_en);
 	}
 }
 
@@ -2229,6 +2303,32 @@
 }
 
 /**
+ * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
+ * @vp: Virtual Path handle.
+ * @msix_id:  MSI ID
+ *
+ * The function clears the msix interrupt for the given msix_id
+ *
+ * Returns: 0,
+ * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
+ * status.
+ * See also:
+ */
+void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
+{
+	struct __vxge_hw_device *hldev = vp->vpath->hldev;
+
+	if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
+		__vxge_hw_pio_mem_write32_upper(
+			(u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
+			&hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
+	else
+		__vxge_hw_pio_mem_write32_upper(
+			(u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
+			&hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
+}
+
+/**
  * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
  * @vp: Virtual Path handle.
  * @msix_id:  MSI ID
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index d48486d..9d9dfda 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -2142,6 +2142,10 @@
  *  Virtual Paths
  */
 
+void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring);
+
+void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo);
+
 u32 vxge_hw_vpath_id(
 	struct __vxge_hw_vpath_handle *vpath_handle);
 
@@ -2245,6 +2249,8 @@
 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
 			int msix_id);
 
+void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id);
+
 void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
 
 void
@@ -2270,6 +2276,9 @@
 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
 
 void
+vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channelh, int msix_id);
+
+void
 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
 				 void **dtrh);
 
@@ -2282,7 +2291,8 @@
 int
 vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
 
-void
-vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
+void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo);
+
+void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring);
 
 #endif
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index ad2f99b..581e215 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -16,8 +16,8 @@
 
 #define VXGE_VERSION_MAJOR	"2"
 #define VXGE_VERSION_MINOR	"5"
-#define VXGE_VERSION_FIX	"1"
-#define VXGE_VERSION_BUILD	"22082"
+#define VXGE_VERSION_FIX	"2"
+#define VXGE_VERSION_BUILD	"22259"
 #define VXGE_VERSION_FOR	"k"
 
 #define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 5b6932c..166e9f7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -402,72 +402,6 @@
 }
 #endif
 
-/**
- * iwl3945_good_plcp_health - checks for plcp error.
- *
- * When the plcp error is exceeding the thresholds, reset the radio
- * to improve the throughput.
- */
-static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
-				struct iwl_rx_packet *pkt)
-{
-	bool rc = true;
-	struct iwl3945_notif_statistics current_stat;
-	int combined_plcp_delta;
-	unsigned int plcp_msec;
-	unsigned long plcp_received_jiffies;
-
-	if (priv->cfg->base_params->plcp_delta_threshold ==
-	    IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
-		IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
-		return rc;
-	}
-	memcpy(&current_stat, pkt->u.raw, sizeof(struct
-			iwl3945_notif_statistics));
-	/*
-	 * check for plcp_err and trigger radio reset if it exceeds
-	 * the plcp error threshold plcp_delta.
-	 */
-	plcp_received_jiffies = jiffies;
-	plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
-					(long) priv->plcp_jiffies);
-	priv->plcp_jiffies = plcp_received_jiffies;
-	/*
-	 * check to make sure plcp_msec is not 0 to prevent division
-	 * by zero.
-	 */
-	if (plcp_msec) {
-		combined_plcp_delta =
-			(le32_to_cpu(current_stat.rx.ofdm.plcp_err) -
-			le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err));
-
-		if ((combined_plcp_delta > 0) &&
-			((combined_plcp_delta * 100) / plcp_msec) >
-			priv->cfg->base_params->plcp_delta_threshold) {
-			/*
-			 * if plcp_err exceed the threshold, the following
-			 * data is printed in csv format:
-			 *    Text: plcp_err exceeded %d,
-			 *    Received ofdm.plcp_err,
-			 *    Current ofdm.plcp_err,
-			 *    combined_plcp_delta,
-			 *    plcp_msec
-			 */
-			IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
-				"%u, %d, %u mSecs\n",
-				priv->cfg->base_params->plcp_delta_threshold,
-				le32_to_cpu(current_stat.rx.ofdm.plcp_err),
-				combined_plcp_delta, plcp_msec);
-			/*
-			 * Reset the RF radio due to the high plcp
-			 * error rate
-			 */
-			rc = false;
-		}
-	}
-	return rc;
-}
-
 void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
 		struct iwl_rx_mem_buffer *rxb)
 {
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 546de57..da1f121 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -120,6 +120,9 @@
 	unsigned long rx_pfn_array[NET_RX_RING_SIZE];
 	struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
 	struct mmu_update rx_mmu[NET_RX_RING_SIZE];
+
+	/* Statistics */
+	int rx_gso_checksum_fixup;
 };
 
 struct netfront_rx_info {
@@ -770,11 +773,29 @@
 	return cons;
 }
 
-static int skb_checksum_setup(struct sk_buff *skb)
+static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
 {
 	struct iphdr *iph;
 	unsigned char *th;
 	int err = -EPROTO;
+	int recalculate_partial_csum = 0;
+
+	/*
+	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
+	 * peers can fail to set NETRXF_csum_blank when sending a GSO
+	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
+	 * recalculate the partial checksum.
+	 */
+	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
+		struct netfront_info *np = netdev_priv(dev);
+		np->rx_gso_checksum_fixup++;
+		skb->ip_summed = CHECKSUM_PARTIAL;
+		recalculate_partial_csum = 1;
+	}
+
+	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return 0;
 
 	if (skb->protocol != htons(ETH_P_IP))
 		goto out;
@@ -788,9 +809,23 @@
 	switch (iph->protocol) {
 	case IPPROTO_TCP:
 		skb->csum_offset = offsetof(struct tcphdr, check);
+
+		if (recalculate_partial_csum) {
+			struct tcphdr *tcph = (struct tcphdr *)th;
+			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+							 skb->len - iph->ihl*4,
+							 IPPROTO_TCP, 0);
+		}
 		break;
 	case IPPROTO_UDP:
 		skb->csum_offset = offsetof(struct udphdr, check);
+
+		if (recalculate_partial_csum) {
+			struct udphdr *udph = (struct udphdr *)th;
+			udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+							 skb->len - iph->ihl*4,
+							 IPPROTO_UDP, 0);
+		}
 		break;
 	default:
 		if (net_ratelimit())
@@ -829,13 +864,11 @@
 		/* Ethernet work: Delayed to here as it peeks the header. */
 		skb->protocol = eth_type_trans(skb, dev);
 
-		if (skb->ip_summed == CHECKSUM_PARTIAL) {
-			if (skb_checksum_setup(skb)) {
-				kfree_skb(skb);
-				packets_dropped++;
-				dev->stats.rx_errors++;
-				continue;
-			}
+		if (checksum_setup(dev, skb)) {
+			kfree_skb(skb);
+			packets_dropped++;
+			dev->stats.rx_errors++;
+			continue;
 		}
 
 		dev->stats.rx_packets++;
@@ -1632,12 +1665,59 @@
 	}
 }
 
+static const struct xennet_stat {
+	char name[ETH_GSTRING_LEN];
+	u16 offset;
+} xennet_stats[] = {
+	{
+		"rx_gso_checksum_fixup",
+		offsetof(struct netfront_info, rx_gso_checksum_fixup)
+	},
+};
+
+static int xennet_get_sset_count(struct net_device *dev, int string_set)
+{
+	switch (string_set) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(xennet_stats);
+	default:
+		return -EINVAL;
+	}
+}
+
+static void xennet_get_ethtool_stats(struct net_device *dev,
+				     struct ethtool_stats *stats, u64 * data)
+{
+	void *np = netdev_priv(dev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
+		data[i] = *(int *)(np + xennet_stats[i].offset);
+}
+
+static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
+			memcpy(data + i * ETH_GSTRING_LEN,
+			       xennet_stats[i].name, ETH_GSTRING_LEN);
+		break;
+	}
+}
+
 static const struct ethtool_ops xennet_ethtool_ops =
 {
 	.set_tx_csum = ethtool_op_set_tx_csum,
 	.set_sg = xennet_set_sg,
 	.set_tso = xennet_set_tso,
 	.get_link = ethtool_op_get_link,
+
+	.get_sset_count = xennet_get_sset_count,
+	.get_ethtool_stats = xennet_get_ethtool_stats,
+	.get_strings = xennet_get_strings,
 };
 
 #ifdef CONFIG_SYSFS
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index a2d9d1e..a848e02 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -678,7 +678,7 @@
 
 	/* Make sure we haven't left any pointers around in the wait
 	 * list. */
-	spin_lock (&port->waitlist_lock);
+	spin_lock_irq(&port->waitlist_lock);
 	if (dev->waitprev || dev->waitnext || port->waithead == dev) {
 		if (dev->waitprev)
 			dev->waitprev->waitnext = dev->waitnext;
@@ -689,7 +689,7 @@
 		else
 			port->waittail = dev->waitprev;
 	}
-	spin_unlock (&port->waitlist_lock);
+	spin_unlock_irq(&port->waitlist_lock);
 
 	kfree(dev->state);
 	kfree(dev);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 8ecaac9..ea25e5b 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -23,6 +23,7 @@
 #include <linux/mm.h>
 #include <linux/fs.h>
 #include <linux/capability.h>
+#include <linux/security.h>
 #include <linux/pci-aspm.h>
 #include <linux/slab.h>
 #include "pci.h"
@@ -368,7 +369,7 @@
 	u8 *data = (u8*) buf;
 
 	/* Several chips lock up trying to read undefined config space */
-	if (cap_raised(filp->f_cred->cap_effective, CAP_SYS_ADMIN)) {
+	if (security_capable(filp->f_cred, CAP_SYS_ADMIN) == 0) {
 		size = dev->cfg_size;
 	} else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
 		size = 128;
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index dda7098..dc29348 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -31,7 +31,7 @@
 # PCI Express ASPM
 #
 config PCIEASPM
-	bool "PCI Express ASPM control" if EMBEDDED
+	bool "PCI Express ASPM control" if EXPERT
 	depends on PCI && PCIEPORTBUS
 	default y
 	help
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index de886f3..6e318ce 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -69,7 +69,7 @@
 config YENTA
 	tristate "CardBus yenta-compatible bridge support"
 	depends on PCI
-	select CARDBUS if !EMBEDDED
+	select CARDBUS if !EXPERT
 	select PCCARD_NONSTATIC if PCMCIA != n
 	---help---
 	  This option enables support for CardBus host bridges.  Virtually
@@ -84,27 +84,27 @@
 
 config YENTA_O2
 	default y
-	bool "Special initialization for O2Micro bridges" if EMBEDDED
+	bool "Special initialization for O2Micro bridges" if EXPERT
 	depends on YENTA
 
 config YENTA_RICOH
 	default y
-	bool "Special initialization for Ricoh bridges" if EMBEDDED
+	bool "Special initialization for Ricoh bridges" if EXPERT
 	depends on YENTA
 
 config YENTA_TI
 	default y
-	bool "Special initialization for TI and EnE bridges" if EMBEDDED
+	bool "Special initialization for TI and EnE bridges" if EXPERT
 	depends on YENTA
 
 config YENTA_ENE_TUNE
 	default y
-	bool "Auto-tune EnE bridges for CB cards" if EMBEDDED
+	bool "Auto-tune EnE bridges for CB cards" if EXPERT
 	depends on YENTA_TI && CARDBUS
 
 config YENTA_TOSHIBA
 	default y
-	bool "Special initialization for Toshiba ToPIC bridges" if EMBEDDED
+	bool "Special initialization for Toshiba ToPIC bridges" if EXPERT
 	depends on YENTA
 
 config PD6729
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 1752ef0..a91d510 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -26,7 +26,6 @@
 #include <linux/sfi.h>
 #include <asm/mrst.h>
 #include <asm/intel_scu_ipc.h>
-#include <asm/mrst.h>
 
 /* IPC defines the following message types */
 #define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */
@@ -161,7 +160,7 @@
 {
 	int i, nc, bytes, d;
 	u32 offset = 0;
-	u32 err = 0;
+	int err;
 	u8 cbuf[IPC_WWBUF_SIZE] = { };
 	u32 *wbuf = (u32 *)&cbuf;
 
@@ -404,7 +403,7 @@
  */
 int intel_scu_ipc_simple_command(int cmd, int sub)
 {
-	u32 err = 0;
+	int err;
 
 	mutex_lock(&ipclock);
 	if (ipcdev.pdev == NULL) {
@@ -434,8 +433,7 @@
 int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
 							u32 *out, int outlen)
 {
-	u32 err = 0;
-	int i = 0;
+	int i, err;
 
 	mutex_lock(&ipclock);
 	if (ipcdev.pdev == NULL) {
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
index ba3231d..b93a032 100644
--- a/drivers/platform/x86/intel_scu_ipcutil.c
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
@@ -128,6 +128,6 @@
 module_init(ipc_module_init);
 module_exit(ipc_module_exit);
 
-MODULE_LICENSE("GPL V2");
+MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Utility driver for intel scu ipc");
 MODULE_AUTHOR("Sreedhara <sreedhara.ds@intel.com>");
diff --git a/drivers/pps/clients/pps-ktimer.c b/drivers/pps/clients/pps-ktimer.c
index 2728469..82583b0 100644
--- a/drivers/pps/clients/pps-ktimer.c
+++ b/drivers/pps/clients/pps-ktimer.c
@@ -46,8 +46,6 @@
 	/* First of all we get the time stamp... */
 	pps_get_ts(&ts);
 
-	dev_info(pps->dev, "PPS event at %lu\n", jiffies);
-
 	pps_event(pps, &ts, PPS_CAPTUREASSERT, NULL);
 
 	mod_timer(&ktimer, jiffies + HZ);
diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
index 32221ef..c571d6d 100644
--- a/drivers/pps/clients/pps_parport.c
+++ b/drivers/pps/clients/pps_parport.c
@@ -163,7 +163,7 @@
 	}
 
 	device->pardev = parport_register_device(port, KBUILD_MODNAME,
-			NULL, NULL, parport_irq, 0, device);
+			NULL, NULL, parport_irq, PARPORT_FLAG_EXCL, device);
 	if (!device->pardev) {
 		pr_err("couldn't register with %s\n", port->name);
 		goto err_free;
diff --git a/drivers/pps/generators/pps_gen_parport.c b/drivers/pps/generators/pps_gen_parport.c
index 5c32f8d..b93af3e 100644
--- a/drivers/pps/generators/pps_gen_parport.c
+++ b/drivers/pps/generators/pps_gen_parport.c
@@ -198,7 +198,7 @@
 	}
 
 	device.pardev = parport_register_device(port, KBUILD_MODNAME,
-			NULL, NULL, NULL, 0, &device);
+			NULL, NULL, NULL, PARPORT_FLAG_EXCL, &device);
 	if (!device.pardev) {
 		pr_err("couldn't register with %s\n", port->name);
 		return;
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 467e82b..a50391b 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -943,6 +943,8 @@
  * @port: Master port to send transactions
  * @destid: Current destination ID in network
  * @hopcount: Number of hops into the network
+ * @prev: previous rio_dev
+ * @prev_port: previous port number
  *
  * Recursively discovers a RIO network.  Transactions are sent via the
  * master port passed in @port.
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 9583cbc..c404b61 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -143,6 +143,7 @@
 	rtc->id = id;
 	rtc->ops = ops;
 	rtc->owner = owner;
+	rtc->irq_freq = 1;
 	rtc->max_user_freq = 64;
 	rtc->dev.parent = dev;
 	rtc->dev.class = rtc_class;
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 90384b9..cb2f072 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -16,6 +16,9 @@
 #include <linux/log2.h>
 #include <linux/workqueue.h>
 
+static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer);
+static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer);
+
 static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
 {
 	int err;
@@ -120,12 +123,18 @@
 	err = mutex_lock_interruptible(&rtc->ops_lock);
 	if (err)
 		return err;
-	alarm->enabled = rtc->aie_timer.enabled;
-	if (alarm->enabled)
+	if (rtc->ops == NULL)
+		err = -ENODEV;
+	else if (!rtc->ops->read_alarm)
+		err = -EINVAL;
+	else {
+		memset(alarm, 0, sizeof(struct rtc_wkalrm));
+		alarm->enabled = rtc->aie_timer.enabled;
 		alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires);
+	}
 	mutex_unlock(&rtc->ops_lock);
 
-	return 0;
+	return err;
 }
 EXPORT_SYMBOL_GPL(rtc_read_alarm);
 
@@ -175,16 +184,14 @@
 		return err;
 	if (rtc->aie_timer.enabled) {
 		rtc_timer_remove(rtc, &rtc->aie_timer);
-		rtc->aie_timer.enabled = 0;
 	}
 	rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
 	rtc->aie_timer.period = ktime_set(0, 0);
 	if (alarm->enabled) {
-		rtc->aie_timer.enabled = 1;
-		rtc_timer_enqueue(rtc, &rtc->aie_timer);
+		err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
 	}
 	mutex_unlock(&rtc->ops_lock);
-	return 0;
+	return err;
 }
 EXPORT_SYMBOL_GPL(rtc_set_alarm);
 
@@ -195,16 +202,15 @@
 		return err;
 
 	if (rtc->aie_timer.enabled != enabled) {
-		if (enabled) {
-			rtc->aie_timer.enabled = 1;
-			rtc_timer_enqueue(rtc, &rtc->aie_timer);
-		} else {
+		if (enabled)
+			err = rtc_timer_enqueue(rtc, &rtc->aie_timer);
+		else
 			rtc_timer_remove(rtc, &rtc->aie_timer);
-			rtc->aie_timer.enabled = 0;
-		}
 	}
 
-	if (!rtc->ops)
+	if (err)
+		/* nothing */;
+	else if (!rtc->ops)
 		err = -ENODEV;
 	else if (!rtc->ops->alarm_irq_enable)
 		err = -EINVAL;
@@ -222,6 +228,12 @@
 	if (err)
 		return err;
 
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+	if (enabled == 0 && rtc->uie_irq_active) {
+		mutex_unlock(&rtc->ops_lock);
+		return rtc_dev_update_irq_enable_emul(rtc, 0);
+	}
+#endif
 	/* make sure we're changing state */
 	if (rtc->uie_rtctimer.enabled == enabled)
 		goto out;
@@ -235,15 +247,22 @@
 		now = rtc_tm_to_ktime(tm);
 		rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
 		rtc->uie_rtctimer.period = ktime_set(1, 0);
-		rtc->uie_rtctimer.enabled = 1;
-		rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
-	} else {
+		err = rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
+	} else
 		rtc_timer_remove(rtc, &rtc->uie_rtctimer);
-		rtc->uie_rtctimer.enabled = 0;
-	}
 
 out:
 	mutex_unlock(&rtc->ops_lock);
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+	/*
+	 * Enable emulation if the driver did not provide
+	 * the update_irq_enable function pointer or if returned
+	 * -EINVAL to signal that it has been configured without
+	 * interrupts or that are not available at the moment.
+	 */
+	if (err == -EINVAL)
+		err = rtc_dev_update_irq_enable_emul(rtc, enabled);
+#endif
 	return err;
 
 }
@@ -259,7 +278,7 @@
  *
  * Triggers the registered irq_task function callback.
  */
-static void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
+void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
 {
 	unsigned long flags;
 
@@ -460,6 +479,9 @@
 	int err = 0;
 	unsigned long flags;
 
+	if (freq <= 0)
+		return -EINVAL;
+
 	spin_lock_irqsave(&rtc->irq_task_lock, flags);
 	if (rtc->irq_task != NULL && task == NULL)
 		err = -EBUSY;
@@ -488,10 +510,13 @@
  * Enqueues a timer onto the rtc devices timerqueue and sets
  * the next alarm event appropriately.
  *
+ * Sets the enabled bit on the added timer.
+ *
  * Must hold ops_lock for proper serialization of timerqueue
  */
-void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
+static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
 {
+	timer->enabled = 1;
 	timerqueue_add(&rtc->timerqueue, &timer->node);
 	if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
 		struct rtc_wkalrm alarm;
@@ -501,7 +526,13 @@
 		err = __rtc_set_alarm(rtc, &alarm);
 		if (err == -ETIME)
 			schedule_work(&rtc->irqwork);
+		else if (err) {
+			timerqueue_del(&rtc->timerqueue, &timer->node);
+			timer->enabled = 0;
+			return err;
+		}
 	}
+	return 0;
 }
 
 /**
@@ -512,13 +543,15 @@
  * Removes a timer onto the rtc devices timerqueue and sets
  * the next alarm event appropriately.
  *
+ * Clears the enabled bit on the removed timer.
+ *
  * Must hold ops_lock for proper serialization of timerqueue
  */
-void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
+static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
 {
 	struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
 	timerqueue_del(&rtc->timerqueue, &timer->node);
-
+	timer->enabled = 0;
 	if (next == &timer->node) {
 		struct rtc_wkalrm alarm;
 		int err;
@@ -626,8 +659,7 @@
 	timer->node.expires = expires;
 	timer->period = period;
 
-	timer->enabled = 1;
-	rtc_timer_enqueue(rtc, timer);
+	ret = rtc_timer_enqueue(rtc, timer);
 
 	mutex_unlock(&rtc->ops_lock);
 	return ret;
@@ -645,7 +677,6 @@
 	mutex_lock(&rtc->ops_lock);
 	if (timer->enabled)
 		rtc_timer_remove(rtc, timer);
-	timer->enabled = 0;
 	mutex_unlock(&rtc->ops_lock);
 	return ret;
 }
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
index b2752b6..e725d51 100644
--- a/drivers/rtc/rtc-at32ap700x.c
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -134,36 +134,29 @@
 	return ret;
 }
 
-static int at32_rtc_ioctl(struct device *dev, unsigned int cmd,
-			unsigned long arg)
+static int at32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
 	int ret = 0;
 
 	spin_lock_irq(&rtc->lock);
 
-	switch (cmd) {
-	case RTC_AIE_ON:
+	if(enabled) {
 		if (rtc_readl(rtc, VAL) > rtc->alarm_time) {
 			ret = -EINVAL;
-			break;
+			goto out;
 		}
 		rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
 				| RTC_BIT(CTRL_TOPEN));
 		rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
 		rtc_writel(rtc, IER, RTC_BIT(IER_TOPI));
-		break;
-	case RTC_AIE_OFF:
+	} else {
 		rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
 				& ~RTC_BIT(CTRL_TOPEN));
 		rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
 		rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
-		break;
-	default:
-		ret = -ENOIOCTLCMD;
-		break;
 	}
-
+out:
 	spin_unlock_irq(&rtc->lock);
 
 	return ret;
@@ -195,11 +188,11 @@
 }
 
 static struct rtc_class_ops at32_rtc_ops = {
-	.ioctl		= at32_rtc_ioctl,
 	.read_time	= at32_rtc_readtime,
 	.set_time	= at32_rtc_settime,
 	.read_alarm	= at32_rtc_readalarm,
 	.set_alarm	= at32_rtc_setalarm,
+	.alarm_irq_enable = at32_rtc_alarm_irq_enable,
 };
 
 static int __init at32_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index bc8bbca..26d1cf5 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -195,13 +195,6 @@
 
 	/* important:  scrub old status before enabling IRQs */
 	switch (cmd) {
-	case RTC_AIE_OFF:	/* alarm off */
-		at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
-		break;
-	case RTC_AIE_ON:	/* alarm on */
-		at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
-		at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
-		break;
 	case RTC_UIE_OFF:	/* update off */
 		at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV);
 		break;
@@ -217,6 +210,18 @@
 	return ret;
 }
 
+static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	pr_debug("%s(): cmd=%08x\n", __func__, enabled);
+
+	if (enabled) {
+		at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
+		at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
+	} else
+		at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+
+	return 0;
+}
 /*
  * Provide additional RTC information in /proc/driver/rtc
  */
@@ -270,6 +275,7 @@
 	.read_alarm	= at91_rtc_readalarm,
 	.set_alarm	= at91_rtc_setalarm,
 	.proc		= at91_rtc_proc,
+	.alarm_irq_enable = at91_rtc_alarm_irq_enable,
 };
 
 /*
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index f677e07..c36749e 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -229,12 +229,6 @@
 	dev_dbg(dev, "ioctl: cmd=%08x, arg=%08lx, mr %08x\n", cmd, arg, mr);
 
 	switch (cmd) {
-	case RTC_AIE_OFF:		/* alarm off */
-		rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN);
-		break;
-	case RTC_AIE_ON:		/* alarm on */
-		rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN);
-		break;
 	case RTC_UIE_OFF:		/* update off */
 		rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
 		break;
@@ -249,6 +243,19 @@
 	return ret;
 }
 
+static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	struct sam9_rtc *rtc = dev_get_drvdata(dev);
+	u32 mr = rtt_readl(rtc, MR);
+
+	dev_dbg(dev, "alarm_irq_enable: enabled=%08x, mr %08x\n", enabled, mr);
+	if (enabled)
+		rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN);
+	else
+		rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN);
+	return 0;
+}
+
 /*
  * Provide additional RTC information in /proc/driver/rtc
  */
@@ -302,6 +309,7 @@
 	.read_alarm	= at91_rtc_readalarm,
 	.set_alarm	= at91_rtc_setalarm,
 	.proc		= at91_rtc_proc,
+	.alarm_irq_enabled = at91_rtc_alarm_irq_enable,
 };
 
 /*
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index b4b6087..17971d9 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -259,15 +259,6 @@
 		bfin_rtc_int_clear(~RTC_ISTAT_SEC);
 		break;
 
-	case RTC_AIE_ON:
-		dev_dbg_stamp(dev);
-		bfin_rtc_int_set_alarm(rtc);
-		break;
-	case RTC_AIE_OFF:
-		dev_dbg_stamp(dev);
-		bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
-		break;
-
 	default:
 		dev_dbg_stamp(dev);
 		ret = -ENOIOCTLCMD;
@@ -276,6 +267,17 @@
 	return ret;
 }
 
+static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	struct bfin_rtc *rtc = dev_get_drvdata(dev);
+
+	dev_dbg_stamp(dev);
+	if (enabled)
+		bfin_rtc_int_set_alarm(rtc);
+	else
+		bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
+}
+
 static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
 	struct bfin_rtc *rtc = dev_get_drvdata(dev);
@@ -362,6 +364,7 @@
 	.read_alarm    = bfin_rtc_read_alarm,
 	.set_alarm     = bfin_rtc_set_alarm,
 	.proc          = bfin_rtc_proc,
+	.alarm_irq_enable = bfin_rtc_alarm_irq_enable,
 };
 
 static int __devinit bfin_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 212b16e..d0e06ed 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -46,6 +46,105 @@
 	return err;
 }
 
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+/*
+ * Routine to poll RTC seconds field for change as often as possible,
+ * after first RTC_UIE use timer to reduce polling
+ */
+static void rtc_uie_task(struct work_struct *work)
+{
+	struct rtc_device *rtc =
+		container_of(work, struct rtc_device, uie_task);
+	struct rtc_time tm;
+	int num = 0;
+	int err;
+
+	err = rtc_read_time(rtc, &tm);
+
+	spin_lock_irq(&rtc->irq_lock);
+	if (rtc->stop_uie_polling || err) {
+		rtc->uie_task_active = 0;
+	} else if (rtc->oldsecs != tm.tm_sec) {
+		num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
+		rtc->oldsecs = tm.tm_sec;
+		rtc->uie_timer.expires = jiffies + HZ - (HZ/10);
+		rtc->uie_timer_active = 1;
+		rtc->uie_task_active = 0;
+		add_timer(&rtc->uie_timer);
+	} else if (schedule_work(&rtc->uie_task) == 0) {
+		rtc->uie_task_active = 0;
+	}
+	spin_unlock_irq(&rtc->irq_lock);
+	if (num)
+		rtc_handle_legacy_irq(rtc, num, RTC_UF);
+}
+static void rtc_uie_timer(unsigned long data)
+{
+	struct rtc_device *rtc = (struct rtc_device *)data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rtc->irq_lock, flags);
+	rtc->uie_timer_active = 0;
+	rtc->uie_task_active = 1;
+	if ((schedule_work(&rtc->uie_task) == 0))
+		rtc->uie_task_active = 0;
+	spin_unlock_irqrestore(&rtc->irq_lock, flags);
+}
+
+static int clear_uie(struct rtc_device *rtc)
+{
+	spin_lock_irq(&rtc->irq_lock);
+	if (rtc->uie_irq_active) {
+		rtc->stop_uie_polling = 1;
+		if (rtc->uie_timer_active) {
+			spin_unlock_irq(&rtc->irq_lock);
+			del_timer_sync(&rtc->uie_timer);
+			spin_lock_irq(&rtc->irq_lock);
+			rtc->uie_timer_active = 0;
+		}
+		if (rtc->uie_task_active) {
+			spin_unlock_irq(&rtc->irq_lock);
+			flush_scheduled_work();
+			spin_lock_irq(&rtc->irq_lock);
+		}
+		rtc->uie_irq_active = 0;
+	}
+	spin_unlock_irq(&rtc->irq_lock);
+	return 0;
+}
+
+static int set_uie(struct rtc_device *rtc)
+{
+	struct rtc_time tm;
+	int err;
+
+	err = rtc_read_time(rtc, &tm);
+	if (err)
+		return err;
+	spin_lock_irq(&rtc->irq_lock);
+	if (!rtc->uie_irq_active) {
+		rtc->uie_irq_active = 1;
+		rtc->stop_uie_polling = 0;
+		rtc->oldsecs = tm.tm_sec;
+		rtc->uie_task_active = 1;
+		if (schedule_work(&rtc->uie_task) == 0)
+			rtc->uie_task_active = 0;
+	}
+	rtc->irq_data = 0;
+	spin_unlock_irq(&rtc->irq_lock);
+	return 0;
+}
+
+int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled)
+{
+	if (enabled)
+		return set_uie(rtc);
+	else
+		return clear_uie(rtc);
+}
+EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul);
+
+#endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */
 
 static ssize_t
 rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
@@ -154,19 +253,7 @@
 	if (err)
 		goto done;
 
-	/* try the driver's ioctl interface */
-	if (ops->ioctl) {
-		err = ops->ioctl(rtc->dev.parent, cmd, arg);
-		if (err != -ENOIOCTLCMD) {
-			mutex_unlock(&rtc->ops_lock);
-			return err;
-		}
-	}
-
-	/* if the driver does not provide the ioctl interface
-	 * or if that particular ioctl was not implemented
-	 * (-ENOIOCTLCMD), we will try to emulate here.
-	 *
+	/*
 	 * Drivers *SHOULD NOT* provide ioctl implementations
 	 * for these requests.  Instead, provide methods to
 	 * support the following code, so that the RTC's main
@@ -329,7 +416,12 @@
 		return err;
 
 	default:
-		err = -ENOTTY;
+		/* Finally try the driver's ioctl interface */
+		if (ops->ioctl) {
+			err = ops->ioctl(rtc->dev.parent, cmd, arg);
+			if (err == -ENOIOCTLCMD)
+				err = -ENOTTY;
+		}
 		break;
 	}
 
@@ -394,6 +486,11 @@
 
 	rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id);
 
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+	INIT_WORK(&rtc->uie_task, rtc_uie_task);
+	setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
+#endif
+
 	cdev_init(&rtc->char_dev, &rtc_dev_fops);
 	rtc->char_dev.owner = rtc->owner;
 }
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c
index bf430f9..60ce696 100644
--- a/drivers/rtc/rtc-ds1286.c
+++ b/drivers/rtc/rtc-ds1286.c
@@ -40,6 +40,26 @@
 	__raw_writel(data, &priv->rtcregs[reg]);
 }
 
+
+static int ds1286_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	struct ds1286_priv *priv = dev_get_drvdata(dev);
+	unsigned long flags;
+	unsigned char val;
+
+	/* Allow or mask alarm interrupts */
+	spin_lock_irqsave(&priv->lock, flags);
+	val = ds1286_rtc_read(priv, RTC_CMD);
+	if (enabled)
+		val &=  ~RTC_TDM;
+	else
+		val |=  RTC_TDM;
+	ds1286_rtc_write(priv, val, RTC_CMD);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return 0;
+}
+
 #ifdef CONFIG_RTC_INTF_DEV
 
 static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
@@ -49,22 +69,6 @@
 	unsigned char val;
 
 	switch (cmd) {
-	case RTC_AIE_OFF:
-		/* Mask alarm int. enab. bit	*/
-		spin_lock_irqsave(&priv->lock, flags);
-		val = ds1286_rtc_read(priv, RTC_CMD);
-		val |=  RTC_TDM;
-		ds1286_rtc_write(priv, val, RTC_CMD);
-		spin_unlock_irqrestore(&priv->lock, flags);
-		break;
-	case RTC_AIE_ON:
-		/* Allow alarm interrupts.	*/
-		spin_lock_irqsave(&priv->lock, flags);
-		val = ds1286_rtc_read(priv, RTC_CMD);
-		val &=  ~RTC_TDM;
-		ds1286_rtc_write(priv, val, RTC_CMD);
-		spin_unlock_irqrestore(&priv->lock, flags);
-		break;
 	case RTC_WIE_OFF:
 		/* Mask watchdog int. enab. bit	*/
 		spin_lock_irqsave(&priv->lock, flags);
@@ -316,12 +320,13 @@
 }
 
 static const struct rtc_class_ops ds1286_ops = {
-	.ioctl   	= ds1286_ioctl,
-	.proc   	= ds1286_proc,
+	.ioctl		= ds1286_ioctl,
+	.proc		= ds1286_proc,
 	.read_time	= ds1286_read_time,
 	.set_time	= ds1286_set_time,
 	.read_alarm	= ds1286_read_alarm,
 	.set_alarm	= ds1286_set_alarm,
+	.alarm_irq_enable = ds1286_alarm_irq_enable,
 };
 
 static int __devinit ds1286_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 077af1d..57fbcc1 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -139,49 +139,32 @@
  * Interface to RTC framework
  */
 
-#ifdef CONFIG_RTC_INTF_DEV
-
-/*
- * Context: caller holds rtc->ops_lock (to protect ds1305->ctrl)
- */
-static int ds1305_ioctl(struct device *dev, unsigned cmd, unsigned long arg)
+static int ds1305_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct ds1305	*ds1305 = dev_get_drvdata(dev);
 	u8		buf[2];
-	int		status = -ENOIOCTLCMD;
+	long		err = -EINVAL;
 
 	buf[0] = DS1305_WRITE | DS1305_CONTROL;
 	buf[1] = ds1305->ctrl[0];
 
-	switch (cmd) {
-	case RTC_AIE_OFF:
-		status = 0;
-		if (!(buf[1] & DS1305_AEI0))
-			goto done;
-		buf[1] &= ~DS1305_AEI0;
-		break;
-
-	case RTC_AIE_ON:
-		status = 0;
+	if (enabled) {
 		if (ds1305->ctrl[0] & DS1305_AEI0)
 			goto done;
 		buf[1] |= DS1305_AEI0;
-		break;
+	} else {
+		if (!(buf[1] & DS1305_AEI0))
+			goto done;
+		buf[1] &= ~DS1305_AEI0;
 	}
-	if (status == 0) {
-		status = spi_write_then_read(ds1305->spi, buf, sizeof buf,
-				NULL, 0);
-		if (status >= 0)
-			ds1305->ctrl[0] = buf[1];
-	}
-
+	err = spi_write_then_read(ds1305->spi, buf, sizeof buf, NULL, 0);
+	if (err >= 0)
+		ds1305->ctrl[0] = buf[1];
 done:
-	return status;
+	return err;
+
 }
 
-#else
-#define ds1305_ioctl	NULL
-#endif
 
 /*
  * Get/set of date and time is pretty normal.
@@ -460,12 +443,12 @@
 #endif
 
 static const struct rtc_class_ops ds1305_ops = {
-	.ioctl		= ds1305_ioctl,
 	.read_time	= ds1305_get_time,
 	.set_time	= ds1305_set_time,
 	.read_alarm	= ds1305_get_alarm,
 	.set_alarm	= ds1305_set_alarm,
 	.proc		= ds1305_proc,
+	.alarm_irq_enable = ds1305_alarm_irq_enable,
 };
 
 static void ds1305_work(struct work_struct *work)
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 0d559b6..4724ba3 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -495,50 +495,27 @@
 	return 0;
 }
 
-static int ds1307_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int ds1307_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct i2c_client	*client = to_i2c_client(dev);
 	struct ds1307		*ds1307 = i2c_get_clientdata(client);
 	int			ret;
 
-	switch (cmd) {
-	case RTC_AIE_OFF:
-		if (!test_bit(HAS_ALARM, &ds1307->flags))
-			return -ENOTTY;
+	if (!test_bit(HAS_ALARM, &ds1307->flags))
+		return -ENOTTY;
 
-		ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
-		if (ret < 0)
-			return ret;
+	ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
+	if (ret < 0)
+		return ret;
 
+	if (enabled)
+		ret |= DS1337_BIT_A1IE;
+	else
 		ret &= ~DS1337_BIT_A1IE;
 
-		ret = i2c_smbus_write_byte_data(client,
-						DS1337_REG_CONTROL, ret);
-		if (ret < 0)
-			return ret;
-
-		break;
-
-	case RTC_AIE_ON:
-		if (!test_bit(HAS_ALARM, &ds1307->flags))
-			return -ENOTTY;
-
-		ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
-		if (ret < 0)
-			return ret;
-
-		ret |= DS1337_BIT_A1IE;
-
-		ret = i2c_smbus_write_byte_data(client,
-						DS1337_REG_CONTROL, ret);
-		if (ret < 0)
-			return ret;
-
-		break;
-
-	default:
-		return -ENOIOCTLCMD;
-	}
+	ret = i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, ret);
+	if (ret < 0)
+		return ret;
 
 	return 0;
 }
@@ -548,7 +525,7 @@
 	.set_time	= ds1307_set_time,
 	.read_alarm	= ds1337_read_alarm,
 	.set_alarm	= ds1337_set_alarm,
-	.ioctl		= ds1307_ioctl,
+	.alarm_irq_enable = ds1307_alarm_irq_enable,
 };
 
 /*----------------------------------------------------------------------*/
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 47fb635..d834a63 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -307,42 +307,25 @@
 	mutex_unlock(&ds1374->mutex);
 }
 
-static int ds1374_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int ds1374_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	struct ds1374 *ds1374 = i2c_get_clientdata(client);
-	int ret = -ENOIOCTLCMD;
+	int ret;
 
 	mutex_lock(&ds1374->mutex);
 
-	switch (cmd) {
-	case RTC_AIE_OFF:
-		ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
-		if (ret < 0)
-			goto out;
+	ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
+	if (ret < 0)
+		goto out;
 
-		ret &= ~DS1374_REG_CR_WACE;
-
-		ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret);
-		if (ret < 0)
-			goto out;
-
-		break;
-
-	case RTC_AIE_ON:
-		ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
-		if (ret < 0)
-			goto out;
-
+	if (enabled) {
 		ret |= DS1374_REG_CR_WACE | DS1374_REG_CR_AIE;
 		ret &= ~DS1374_REG_CR_WDALM;
-
-		ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret);
-		if (ret < 0)
-			goto out;
-
-		break;
+	} else {
+		ret &= ~DS1374_REG_CR_WACE;
 	}
+	ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret);
 
 out:
 	mutex_unlock(&ds1374->mutex);
@@ -354,7 +337,7 @@
 	.set_time = ds1374_set_time,
 	.read_alarm = ds1374_read_alarm,
 	.set_alarm = ds1374_set_alarm,
-	.ioctl = ds1374_ioctl,
+	.alarm_irq_enable = ds1374_alarm_irq_enable,
 };
 
 static int ds1374_probe(struct i2c_client *client,
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 5a8daa3..69fe664 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -213,41 +213,27 @@
 	return m41t80_set_datetime(to_i2c_client(dev), tm);
 }
 
-#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
-static int
-m41t80_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int m41t80_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	int rc;
 
-	switch (cmd) {
-	case RTC_AIE_OFF:
-	case RTC_AIE_ON:
-		break;
-	default:
-		return -ENOIOCTLCMD;
-	}
-
 	rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
 	if (rc < 0)
 		goto err;
-	switch (cmd) {
-	case RTC_AIE_OFF:
-		rc &= ~M41T80_ALMON_AFE;
-		break;
-	case RTC_AIE_ON:
+
+	if (enabled)
 		rc |= M41T80_ALMON_AFE;
-		break;
-	}
+	else
+		rc &= ~M41T80_ALMON_AFE;
+
 	if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, rc) < 0)
 		goto err;
+
 	return 0;
 err:
 	return -EIO;
 }
-#else
-#define	m41t80_rtc_ioctl NULL
-#endif
 
 static int m41t80_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t)
 {
@@ -374,7 +360,7 @@
 	.read_alarm = m41t80_rtc_read_alarm,
 	.set_alarm = m41t80_rtc_set_alarm,
 	.proc = m41t80_rtc_proc,
-	.ioctl = m41t80_rtc_ioctl,
+	.alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
 };
 
 #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index a99a0b5..3978f4c 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -263,30 +263,21 @@
 /*
  * Handle commands from user-space
  */
-static int m48t59_rtc_ioctl(struct device *dev, unsigned int cmd,
-			unsigned long arg)
+static int m48t59_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct m48t59_plat_data *pdata = pdev->dev.platform_data;
 	struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
 	unsigned long flags;
-	int ret = 0;
 
 	spin_lock_irqsave(&m48t59->lock, flags);
-	switch (cmd) {
-	case RTC_AIE_OFF:	/* alarm interrupt off */
-		M48T59_WRITE(0x00, M48T59_INTR);
-		break;
-	case RTC_AIE_ON:	/* alarm interrupt on */
+	if (enabled)
 		M48T59_WRITE(M48T59_INTR_AFE, M48T59_INTR);
-		break;
-	default:
-		ret = -ENOIOCTLCMD;
-		break;
-	}
+	else
+		M48T59_WRITE(0x00, M48T59_INTR);
 	spin_unlock_irqrestore(&m48t59->lock, flags);
 
-	return ret;
+	return 0;
 }
 
 static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq)
@@ -330,12 +321,12 @@
 }
 
 static const struct rtc_class_ops m48t59_rtc_ops = {
-	.ioctl		= m48t59_rtc_ioctl,
 	.read_time	= m48t59_rtc_read_time,
 	.set_time	= m48t59_rtc_set_time,
 	.read_alarm	= m48t59_rtc_readalarm,
 	.set_alarm	= m48t59_rtc_setalarm,
 	.proc		= m48t59_rtc_proc,
+	.alarm_irq_enable = m48t59_rtc_alarm_irq_enable,
 };
 
 static const struct rtc_class_ops m48t02_rtc_ops = {
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index bcd0cf6..1db62db 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -255,42 +255,21 @@
 	return 0;
 }
 
-#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
-
 /* Currently, the vRTC doesn't support UIE ON/OFF */
-static int
-mrst_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+static int mrst_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct mrst_rtc	*mrst = dev_get_drvdata(dev);
 	unsigned long	flags;
 
-	switch (cmd) {
-	case RTC_AIE_OFF:
-	case RTC_AIE_ON:
-		if (!mrst->irq)
-			return -EINVAL;
-		break;
-	default:
-		/* PIE ON/OFF is handled by mrst_irq_set_state() */
-		return -ENOIOCTLCMD;
-	}
-
 	spin_lock_irqsave(&rtc_lock, flags);
-	switch (cmd) {
-	case RTC_AIE_OFF:	/* alarm off */
-		mrst_irq_disable(mrst, RTC_AIE);
-		break;
-	case RTC_AIE_ON:	/* alarm on */
+	if (enabled)
 		mrst_irq_enable(mrst, RTC_AIE);
-		break;
-	}
+	else
+		mrst_irq_disable(mrst, RTC_AIE);
 	spin_unlock_irqrestore(&rtc_lock, flags);
 	return 0;
 }
 
-#else
-#define	mrst_rtc_ioctl	NULL
-#endif
 
 #if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
 
@@ -317,13 +296,13 @@
 #endif
 
 static const struct rtc_class_ops mrst_rtc_ops = {
-	.ioctl		= mrst_rtc_ioctl,
 	.read_time	= mrst_read_time,
 	.set_time	= mrst_set_time,
 	.read_alarm	= mrst_read_alarm,
 	.set_alarm	= mrst_set_alarm,
 	.proc		= mrst_procfs,
 	.irq_set_state	= mrst_irq_set_state,
+	.alarm_irq_enable = mrst_rtc_alarm_irq_enable,
 };
 
 static struct mrst_rtc	mrst_rtc;
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c
index b2fff0c..6782062 100644
--- a/drivers/rtc/rtc-msm6242.c
+++ b/drivers/rtc/rtc-msm6242.c
@@ -82,7 +82,7 @@
 static inline void msm6242_write(struct msm6242_priv *priv, unsigned int val,
 				unsigned int reg)
 {
-	return __raw_writel(val, &priv->regs[reg]);
+	__raw_writel(val, &priv->regs[reg]);
 }
 
 static inline void msm6242_set(struct msm6242_priv *priv, unsigned int val,
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index bcca472..60627a7 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -169,25 +169,19 @@
 	return 0;
 }
 
-static int mv_rtc_ioctl(struct device *dev, unsigned int cmd,
-			unsigned long arg)
+static int mv_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
 	void __iomem *ioaddr = pdata->ioaddr;
 
 	if (pdata->irq < 0)
-		return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
-	switch (cmd) {
-	case RTC_AIE_OFF:
-		writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
-		break;
-	case RTC_AIE_ON:
+		return -EINVAL; /* fall back into rtc-dev's emulation */
+
+	if (enabled)
 		writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
-		break;
-	default:
-		return -ENOIOCTLCMD;
-	}
+	else
+		writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
 	return 0;
 }
 
@@ -216,7 +210,7 @@
 	.set_time	= mv_rtc_set_time,
 	.read_alarm	= mv_rtc_read_alarm,
 	.set_alarm	= mv_rtc_set_alarm,
-	.ioctl		= mv_rtc_ioctl,
+	.alarm_irq_enable = mv_rtc_alarm_irq_enable,
 };
 
 static int __devinit mv_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index e72b523..b4dbf3a 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -143,8 +143,6 @@
 	u8 reg;
 
 	switch (cmd) {
-	case RTC_AIE_OFF:
-	case RTC_AIE_ON:
 	case RTC_UIE_OFF:
 	case RTC_UIE_ON:
 		break;
@@ -156,13 +154,6 @@
 	rtc_wait_not_busy();
 	reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
 	switch (cmd) {
-	/* AIE = Alarm Interrupt Enable */
-	case RTC_AIE_OFF:
-		reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
-		break;
-	case RTC_AIE_ON:
-		reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
-		break;
 	/* UIE = Update Interrupt Enable (1/second) */
 	case RTC_UIE_OFF:
 		reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER;
@@ -182,6 +173,24 @@
 #define	omap_rtc_ioctl	NULL
 #endif
 
+static int omap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	u8 reg;
+
+	local_irq_disable();
+	rtc_wait_not_busy();
+	reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
+	if (enabled)
+		reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
+	else
+		reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
+	rtc_wait_not_busy();
+	rtc_write(reg, OMAP_RTC_INTERRUPTS_REG);
+	local_irq_enable();
+
+	return 0;
+}
+
 /* this hardware doesn't support "don't care" alarm fields */
 static int tm2bcd(struct rtc_time *tm)
 {
@@ -309,6 +318,7 @@
 	.set_time	= omap_rtc_set_time,
 	.read_alarm	= omap_rtc_read_alarm,
 	.set_alarm	= omap_rtc_set_alarm,
+	.alarm_irq_enable = omap_rtc_alarm_irq_enable,
 };
 
 static int omap_rtc_alarm;
diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c
index c086fc3..242bbf8 100644
--- a/drivers/rtc/rtc-proc.c
+++ b/drivers/rtc/rtc-proc.c
@@ -81,12 +81,16 @@
 
 static int rtc_proc_open(struct inode *inode, struct file *file)
 {
+	int ret;
 	struct rtc_device *rtc = PDE(inode)->data;
 
 	if (!try_module_get(THIS_MODULE))
 		return -ENODEV;
 
-	return single_open(file, rtc_proc_show, rtc);
+	ret = single_open(file, rtc_proc_show, rtc);
+	if (ret)
+		module_put(THIS_MODULE);
+	return ret;
 }
 
 static int rtc_proc_release(struct inode *inode, struct file *file)
diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c
index 36eb661..694da39 100644
--- a/drivers/rtc/rtc-rp5c01.c
+++ b/drivers/rtc/rtc-rp5c01.c
@@ -76,7 +76,7 @@
 static inline void rp5c01_write(struct rp5c01_priv *priv, unsigned int val,
 				unsigned int reg)
 {
-	return __raw_writel(val, &priv->regs[reg]);
+	__raw_writel(val, &priv->regs[reg]);
 }
 
 static void rp5c01_lock(struct rp5c01_priv *priv)
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index dd14e20..6aaa155 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -299,14 +299,6 @@
 		if (rs5c->type == rtc_rs5c372a
 				&& (buf & RS5C372A_CTRL1_SL1))
 			return -ENOIOCTLCMD;
-	case RTC_AIE_OFF:
-	case RTC_AIE_ON:
-		/* these irq management calls only make sense for chips
-		 * which are wired up to an IRQ.
-		 */
-		if (!rs5c->has_irq)
-			return -ENOIOCTLCMD;
-		break;
 	default:
 		return -ENOIOCTLCMD;
 	}
@@ -317,12 +309,6 @@
 
 	addr = RS5C_ADDR(RS5C_REG_CTRL1);
 	switch (cmd) {
-	case RTC_AIE_OFF:	/* alarm off */
-		buf &= ~RS5C_CTRL1_AALE;
-		break;
-	case RTC_AIE_ON:	/* alarm on */
-		buf |= RS5C_CTRL1_AALE;
-		break;
 	case RTC_UIE_OFF:	/* update off */
 		buf &= ~RS5C_CTRL1_CT_MASK;
 		break;
@@ -347,6 +333,39 @@
 #endif
 
 
+static int rs5c_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	struct i2c_client	*client = to_i2c_client(dev);
+	struct rs5c372		*rs5c = i2c_get_clientdata(client);
+	unsigned char		buf;
+	int			status, addr;
+
+	buf = rs5c->regs[RS5C_REG_CTRL1];
+
+	if (!rs5c->has_irq)
+		return -EINVAL;
+
+	status = rs5c_get_regs(rs5c);
+	if (status < 0)
+		return status;
+
+	addr = RS5C_ADDR(RS5C_REG_CTRL1);
+	if (enabled)
+		buf |= RS5C_CTRL1_AALE;
+	else
+		buf &= ~RS5C_CTRL1_AALE;
+
+	if (i2c_smbus_write_byte_data(client, addr, buf) < 0) {
+		printk(KERN_WARNING "%s: can't update alarm\n",
+			rs5c->rtc->name);
+		status = -EIO;
+	} else
+		rs5c->regs[RS5C_REG_CTRL1] = buf;
+
+	return status;
+}
+
+
 /* NOTE:  Since RTC_WKALM_{RD,SET} were originally defined for EFI,
  * which only exposes a polled programming interface; and since
  * these calls map directly to those EFI requests; we don't demand
@@ -466,6 +485,7 @@
 	.set_time	= rs5c372_rtc_set_time,
 	.read_alarm	= rs5c_read_alarm,
 	.set_alarm	= rs5c_set_alarm,
+	.alarm_irq_enable = rs5c_rtc_alarm_irq_enable,
 };
 
 #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 88ea52b..5dfe5ff 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -314,16 +314,6 @@
 		unsigned long arg)
 {
 	switch (cmd) {
-	case RTC_AIE_OFF:
-		spin_lock_irq(&sa1100_rtc_lock);
-		RTSR &= ~RTSR_ALE;
-		spin_unlock_irq(&sa1100_rtc_lock);
-		return 0;
-	case RTC_AIE_ON:
-		spin_lock_irq(&sa1100_rtc_lock);
-		RTSR |= RTSR_ALE;
-		spin_unlock_irq(&sa1100_rtc_lock);
-		return 0;
 	case RTC_UIE_OFF:
 		spin_lock_irq(&sa1100_rtc_lock);
 		RTSR &= ~RTSR_HZE;
@@ -338,6 +328,17 @@
 	return -ENOIOCTLCMD;
 }
 
+static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	spin_lock_irq(&sa1100_rtc_lock);
+	if (enabled)
+		RTSR |= RTSR_ALE;
+	else
+		RTSR &= ~RTSR_ALE;
+	spin_unlock_irq(&sa1100_rtc_lock);
+	return 0;
+}
+
 static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
 	rtc_time_to_tm(RCNR, tm);
@@ -410,6 +411,7 @@
 	.proc = sa1100_rtc_proc,
 	.irq_set_freq = sa1100_irq_set_freq,
 	.irq_set_state = sa1100_irq_set_state,
+	.alarm_irq_enable = sa1100_rtc_alarm_irq_enable,
 };
 
 static int sa1100_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 06e41ed..93314a9 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -350,10 +350,6 @@
 	unsigned int ret = 0;
 
 	switch (cmd) {
-	case RTC_AIE_OFF:
-	case RTC_AIE_ON:
-		sh_rtc_setaie(dev, cmd == RTC_AIE_ON);
-		break;
 	case RTC_UIE_OFF:
 		rtc->periodic_freq &= ~PF_OXS;
 		sh_rtc_setcie(dev, 0);
@@ -369,6 +365,12 @@
 	return ret;
 }
 
+static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	sh_rtc_setaie(dev, enabled);
+	return 0;
+}
+
 static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
 	struct platform_device *pdev = to_platform_device(dev);
@@ -604,6 +606,7 @@
 	.irq_set_state	= sh_rtc_irq_set_state,
 	.irq_set_freq	= sh_rtc_irq_set_freq,
 	.proc		= sh_rtc_proc,
+	.alarm_irq_enable = sh_rtc_alarm_irq_enable,
 };
 
 static int __init sh_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index 51725f7..a82d6fe 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -50,24 +50,9 @@
 	return 0;
 }
 
-static int test_rtc_ioctl(struct device *dev, unsigned int cmd,
-	unsigned long arg)
+static int test_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
 {
-	/* We do support interrupts, they're generated
-	 * using the sysfs interface.
-	 */
-	switch (cmd) {
-	case RTC_PIE_ON:
-	case RTC_PIE_OFF:
-	case RTC_UIE_ON:
-	case RTC_UIE_OFF:
-	case RTC_AIE_ON:
-	case RTC_AIE_OFF:
-		return 0;
-
-	default:
-		return -ENOIOCTLCMD;
-	}
+	return 0;
 }
 
 static const struct rtc_class_ops test_rtc_ops = {
@@ -76,7 +61,7 @@
 	.read_alarm = test_rtc_read_alarm,
 	.set_alarm = test_rtc_set_alarm,
 	.set_mmss = test_rtc_set_mmss,
-	.ioctl = test_rtc_ioctl,
+	.alarm_irq_enable = test_rtc_alarm_irq_enable,
 };
 
 static ssize_t test_irq_show(struct device *dev,
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index c324424..769190a 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -240,26 +240,6 @@
 static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
 {
 	switch (cmd) {
-	case RTC_AIE_ON:
-		spin_lock_irq(&rtc_lock);
-
-		if (!alarm_enabled) {
-			enable_irq(aie_irq);
-			alarm_enabled = 1;
-		}
-
-		spin_unlock_irq(&rtc_lock);
-		break;
-	case RTC_AIE_OFF:
-		spin_lock_irq(&rtc_lock);
-
-		if (alarm_enabled) {
-			disable_irq(aie_irq);
-			alarm_enabled = 0;
-		}
-
-		spin_unlock_irq(&rtc_lock);
-		break;
 	case RTC_EPOCH_READ:
 		return put_user(epoch, (unsigned long __user *)arg);
 	case RTC_EPOCH_SET:
@@ -275,6 +255,24 @@
 	return 0;
 }
 
+static int vr41xx_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+	spin_lock_irq(&rtc_lock);
+	if (enabled) {
+		if (!alarm_enabled) {
+			enable_irq(aie_irq);
+			alarm_enabled = 1;
+		}
+	} else {
+		if (alarm_enabled) {
+			disable_irq(aie_irq);
+			alarm_enabled = 0;
+		}
+	}
+	spin_unlock_irq(&rtc_lock);
+	return 0;
+}
+
 static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id)
 {
 	struct platform_device *pdev = (struct platform_device *)dev_id;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 4155805..2b771f1 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -319,6 +319,9 @@
 
 	private = (struct dasd_eckd_private *) device->private;
 	lcu = private->lcu;
+	/* nothing to do if already disconnected */
+	if (!lcu)
+		return;
 	device->discipline->get_uid(device, &uid);
 	spin_lock_irqsave(&lcu->lock, flags);
 	list_del_init(&device->alias_list);
@@ -680,6 +683,9 @@
 
 	private = (struct dasd_eckd_private *) device->private;
 	lcu = private->lcu;
+	/* nothing to do if already removed */
+	if (!lcu)
+		return 0;
 	spin_lock_irqsave(&lcu->lock, flags);
 	_remove_device_from_lcu(lcu, device);
 	spin_unlock_irqrestore(&lcu->lock, flags);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index e9fff2b..5640c89 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -476,7 +476,7 @@
 static int get_inbound_buffer_frontier(struct qdio_q *q)
 {
 	int count, stop;
-	unsigned char state;
+	unsigned char state = 0;
 
 	/*
 	 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -643,7 +643,7 @@
 static int get_outbound_buffer_frontier(struct qdio_q *q)
 {
 	int count, stop;
-	unsigned char state;
+	unsigned char state = 0;
 
 	if (need_siga_sync(q))
 		if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 65ebee0..b6a6356 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -565,7 +565,7 @@
 	struct iucv_event ev;
 	int rc;
 
-	if (memcmp(iucvMagic, ipuser, sizeof(ipuser)))
+	if (memcmp(iucvMagic, ipuser, 16))
 		/* ipuser must match iucvMagic. */
 		return -EINVAL;
 	rc = -EINVAL;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 29f848b..019ae58 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -988,16 +988,30 @@
 	chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
 	if (chp_dsc != NULL) {
 		/* CHPP field bit 6 == 1 -> single queue */
-		if ((chp_dsc->chpp & 0x02) == 0x02)
+		if ((chp_dsc->chpp & 0x02) == 0x02) {
+			if ((atomic_read(&card->qdio.state) !=
+				QETH_QDIO_UNINITIALIZED) &&
+			    (card->qdio.no_out_queues == 4))
+				/* change from 4 to 1 outbound queues */
+				qeth_free_qdio_buffers(card);
 			card->qdio.no_out_queues = 1;
+			if (card->qdio.default_out_queue != 0)
+				dev_info(&card->gdev->dev,
+					"Priority Queueing not supported\n");
+			card->qdio.default_out_queue = 0;
+		} else {
+			if ((atomic_read(&card->qdio.state) !=
+				QETH_QDIO_UNINITIALIZED) &&
+			    (card->qdio.no_out_queues == 1)) {
+				/* change from 1 to 4 outbound queues */
+				qeth_free_qdio_buffers(card);
+				card->qdio.default_out_queue = 2;
+			}
+			card->qdio.no_out_queues = 4;
+		}
 		card->info.func_level = 0x4100 + chp_dsc->desc;
 		kfree(chp_dsc);
 	}
-	if (card->qdio.no_out_queues == 1) {
-		card->qdio.default_out_queue = 0;
-		dev_info(&card->gdev->dev,
-			"Priority Queueing not supported\n");
-	}
 	QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
 	QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
 	return;
@@ -1832,33 +1846,6 @@
 	}
 }
 
-static inline int qeth_get_max_mtu_for_card(int cardtype)
-{
-	switch (cardtype) {
-
-	case QETH_CARD_TYPE_UNKNOWN:
-	case QETH_CARD_TYPE_OSD:
-	case QETH_CARD_TYPE_OSN:
-	case QETH_CARD_TYPE_OSM:
-	case QETH_CARD_TYPE_OSX:
-		return 61440;
-	case QETH_CARD_TYPE_IQD:
-		return 57344;
-	default:
-		return 1500;
-	}
-}
-
-static inline int qeth_get_mtu_out_of_mpc(int cardtype)
-{
-	switch (cardtype) {
-	case QETH_CARD_TYPE_IQD:
-		return 1;
-	default:
-		return 0;
-	}
-}
-
 static inline int qeth_get_mtu_outof_framesize(int framesize)
 {
 	switch (framesize) {
@@ -1881,10 +1868,9 @@
 	case QETH_CARD_TYPE_OSD:
 	case QETH_CARD_TYPE_OSM:
 	case QETH_CARD_TYPE_OSX:
-		return ((mtu >= 576) && (mtu <= 61440));
 	case QETH_CARD_TYPE_IQD:
 		return ((mtu >= 576) &&
-			(mtu <= card->info.max_mtu + 4096 - 32));
+			(mtu <= card->info.max_mtu));
 	case QETH_CARD_TYPE_OSN:
 	case QETH_CARD_TYPE_UNKNOWN:
 	default:
@@ -1907,7 +1893,7 @@
 	memcpy(&card->token.ulp_filter_r,
 	       QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
 	       QETH_MPC_TOKEN_LENGTH);
-	if (qeth_get_mtu_out_of_mpc(card->info.type)) {
+	if (card->info.type == QETH_CARD_TYPE_IQD) {
 		memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
 		mtu = qeth_get_mtu_outof_framesize(framesize);
 		if (!mtu) {
@@ -1915,12 +1901,21 @@
 			QETH_DBF_TEXT_(SETUP, 2, "  rc%d", iob->rc);
 			return 0;
 		}
-		card->info.max_mtu = mtu;
+		if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) {
+			/* frame size has changed */
+			if (card->dev &&
+			    ((card->dev->mtu == card->info.initial_mtu) ||
+			     (card->dev->mtu > mtu)))
+				card->dev->mtu = mtu;
+			qeth_free_qdio_buffers(card);
+		}
 		card->info.initial_mtu = mtu;
+		card->info.max_mtu = mtu;
 		card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
 	} else {
 		card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
-		card->info.max_mtu = qeth_get_max_mtu_for_card(card->info.type);
+		card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(
+			iob->data);
 		card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
 	}
 
@@ -3775,6 +3770,47 @@
 	}
 }
 
+static void qeth_determine_capabilities(struct qeth_card *card)
+{
+	int rc;
+	int length;
+	char *prcd;
+	struct ccw_device *ddev;
+	int ddev_offline = 0;
+
+	QETH_DBF_TEXT(SETUP, 2, "detcapab");
+	ddev = CARD_DDEV(card);
+	if (!ddev->online) {
+		ddev_offline = 1;
+		rc = ccw_device_set_online(ddev);
+		if (rc) {
+			QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+			goto out;
+		}
+	}
+
+	rc = qeth_read_conf_data(card, (void **) &prcd, &length);
+	if (rc) {
+		QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
+			dev_name(&card->gdev->dev), rc);
+		QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+		goto out_offline;
+	}
+	qeth_configure_unitaddr(card, prcd);
+	qeth_configure_blkt_default(card, prcd);
+	kfree(prcd);
+
+	rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
+	if (rc)
+		QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+
+out_offline:
+	if (ddev_offline == 1)
+		ccw_device_set_offline(ddev);
+out:
+	return;
+}
+
 static int qeth_qdio_establish(struct qeth_card *card)
 {
 	struct qdio_initialize init_data;
@@ -3905,6 +3941,7 @@
 
 	QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
 	atomic_set(&card->force_alloc_skb, 0);
+	qeth_get_channel_path_desc(card);
 retry:
 	if (retries)
 		QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
@@ -3933,6 +3970,7 @@
 		else
 			goto retry;
 	}
+	qeth_determine_capabilities(card);
 	qeth_init_tokens(card);
 	qeth_init_func_level(card);
 	rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
@@ -4202,41 +4240,6 @@
 	card->discipline.ccwgdriver = NULL;
 }
 
-static void qeth_determine_capabilities(struct qeth_card *card)
-{
-	int rc;
-	int length;
-	char *prcd;
-
-	QETH_DBF_TEXT(SETUP, 2, "detcapab");
-	rc = ccw_device_set_online(CARD_DDEV(card));
-	if (rc) {
-		QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
-		goto out;
-	}
-
-
-	rc = qeth_read_conf_data(card, (void **) &prcd, &length);
-	if (rc) {
-		QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
-			dev_name(&card->gdev->dev), rc);
-		QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
-		goto out_offline;
-	}
-	qeth_configure_unitaddr(card, prcd);
-	qeth_configure_blkt_default(card, prcd);
-	kfree(prcd);
-
-	rc = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
-	if (rc)
-		QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
-
-out_offline:
-	ccw_device_set_offline(CARD_DDEV(card));
-out:
-	return;
-}
-
 static int qeth_core_probe_device(struct ccwgroup_device *gdev)
 {
 	struct qeth_card *card;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 7a7a1b6..ada0fe7 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -573,13 +573,13 @@
 		case IPA_RC_L2_DUP_LAYER3_MAC:
 			dev_warn(&card->gdev->dev,
 				"MAC address %pM already exists\n",
-				card->dev->dev_addr);
+				cmd->data.setdelmac.mac);
 			break;
 		case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
 		case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
 			dev_warn(&card->gdev->dev,
 				"MAC address %pM is not authorized\n",
-				card->dev->dev_addr);
+				cmd->data.setdelmac.mac);
 			break;
 		default:
 			break;
@@ -831,12 +831,14 @@
 	return NETDEV_TX_OK;
 }
 
-static int qeth_l2_open(struct net_device *dev)
+static int __qeth_l2_open(struct net_device *dev)
 {
 	struct qeth_card *card = dev->ml_priv;
 	int rc = 0;
 
 	QETH_CARD_TEXT(card, 4, "qethopen");
+	if (card->state == CARD_STATE_UP)
+		return rc;
 	if (card->state != CARD_STATE_SOFTSETUP)
 		return -ENODEV;
 
@@ -857,6 +859,18 @@
 	return rc;
 }
 
+static int qeth_l2_open(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	QETH_CARD_TEXT(card, 5, "qethope_");
+	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+		QETH_CARD_TEXT(card, 3, "openREC");
+		return -ERESTARTSYS;
+	}
+	return __qeth_l2_open(dev);
+}
+
 static int qeth_l2_stop(struct net_device *dev)
 {
 	struct qeth_card *card = dev->ml_priv;
@@ -1046,7 +1060,7 @@
 	if (recover_flag == CARD_STATE_RECOVER) {
 		if (recovery_mode &&
 		    card->info.type != QETH_CARD_TYPE_OSN) {
-			qeth_l2_open(card->dev);
+			__qeth_l2_open(card->dev);
 		} else {
 			rtnl_lock();
 			dev_open(card->dev);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e227e46..d09b0c4 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2998,7 +2998,9 @@
 	 */
 	if (iph->protocol == IPPROTO_UDP)
 		hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP;
-	hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
+	hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ |
+		QETH_HDR_EXT_CSUM_HDR_REQ;
+	iph->check = 0;
 	if (card->options.performance_stats)
 		card->perf_stats.tx_csum++;
 }
@@ -3240,12 +3242,14 @@
 	return NETDEV_TX_OK;
 }
 
-static int qeth_l3_open(struct net_device *dev)
+static int __qeth_l3_open(struct net_device *dev)
 {
 	struct qeth_card *card = dev->ml_priv;
 	int rc = 0;
 
 	QETH_CARD_TEXT(card, 4, "qethopen");
+	if (card->state == CARD_STATE_UP)
+		return rc;
 	if (card->state != CARD_STATE_SOFTSETUP)
 		return -ENODEV;
 	card->data.state = CH_STATE_UP;
@@ -3260,6 +3264,18 @@
 	return rc;
 }
 
+static int qeth_l3_open(struct net_device *dev)
+{
+	struct qeth_card *card = dev->ml_priv;
+
+	QETH_CARD_TEXT(card, 5, "qethope_");
+	if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
+		QETH_CARD_TEXT(card, 3, "openREC");
+		return -ERESTARTSYS;
+	}
+	return __qeth_l3_open(dev);
+}
+
 static int qeth_l3_stop(struct net_device *dev)
 {
 	struct qeth_card *card = dev->ml_priv;
@@ -3564,7 +3580,7 @@
 		netif_carrier_off(card->dev);
 	if (recover_flag == CARD_STATE_RECOVER) {
 		if (recovery_mode)
-			qeth_l3_open(card->dev);
+			__qeth_l3_open(card->dev);
 		else {
 			rtnl_lock();
 			dev_open(card->dev);
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index 65e1cf1..207b7d7 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -60,7 +60,7 @@
 static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8],
 			     u8 ipuser[16])
 {
-	if (strncmp(ipvmid, "*MSG    ", sizeof(ipvmid)) != 0)
+	if (strncmp(ipvmid, "*MSG    ", 8) != 0)
 		return -EINVAL;
 	/* Path pending from *MSG. */
 	return iucv_path_accept(path, &smsg_handler, "SMSGIUCV        ", NULL);
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 475c31a..77b26f5 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -2,7 +2,7 @@
 *******************************************************************************
 **        O.S   : Linux
 **   FILE NAME  : arcmsr.h
-**        BY    : Erich Chen
+**        BY    : Nick Cheng
 **   Description: SCSI RAID Device Driver for
 **                ARECA RAID Host adapter
 *******************************************************************************
@@ -46,8 +46,12 @@
 struct device_attribute;
 /*The limit of outstanding scsi command that firmware can handle*/
 #define ARCMSR_MAX_OUTSTANDING_CMD						256
-#define ARCMSR_MAX_FREECCB_NUM							320
-#define ARCMSR_DRIVER_VERSION		     "Driver Version 1.20.00.15 2010/02/02"
+#ifdef CONFIG_XEN
+	#define ARCMSR_MAX_FREECCB_NUM	160
+#else
+	#define ARCMSR_MAX_FREECCB_NUM	320
+#endif
+#define ARCMSR_DRIVER_VERSION		     "Driver Version 1.20.00.15 2010/08/05"
 #define ARCMSR_SCSI_INITIATOR_ID						255
 #define ARCMSR_MAX_XFER_SECTORS							512
 #define ARCMSR_MAX_XFER_SECTORS_B						4096
@@ -60,7 +64,6 @@
 #define ARCMSR_MAX_HBB_POSTQUEUE						264
 #define ARCMSR_MAX_XFER_LEN							0x26000 /* 152K */
 #define ARCMSR_CDB_SG_PAGE_LENGTH						256 
-#define SCSI_CMD_ARECA_SPECIFIC						0xE1
 #ifndef PCI_DEVICE_ID_ARECA_1880
 #define PCI_DEVICE_ID_ARECA_1880 0x1880
  #endif
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index a4e04c5..acdae33 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -2,7 +2,7 @@
 *******************************************************************************
 **        O.S   : Linux
 **   FILE NAME  : arcmsr_attr.c
-**        BY    : Erich Chen
+**        BY    : Nick Cheng
 **   Description: attributes exported to sysfs and device host
 *******************************************************************************
 ** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 1cadcd6..984bd52 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2,7 +2,7 @@
 *******************************************************************************
 **        O.S   : Linux
 **   FILE NAME  : arcmsr_hba.c
-**        BY    : Erich Chen
+**        BY    : Nick Cheng
 **   Description: SCSI RAID Device Driver for
 **                ARECA RAID Host adapter
 *******************************************************************************
@@ -76,7 +76,7 @@
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_VERSION(ARCMSR_DRIVER_VERSION);
 static int sleeptime = 10;
-static int retrycount = 30;
+static int retrycount = 12;
 wait_queue_head_t wait_q;
 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
 					struct scsi_cmnd *cmd);
@@ -187,7 +187,6 @@
 		if (isleep > 0) {
 			msleep(isleep*1000);
 		}
-		printk(KERN_NOTICE "wake-up\n");
 		return 0;
 }
 
@@ -921,7 +920,6 @@
 }
 
 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
-
 {
 	int id, lun;
 	if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
@@ -948,7 +946,7 @@
 				, pCCB->startdone
 				, atomic_read(&acb->ccboutstandingcount));
 		  return;
-		}
+	}
 	arcmsr_report_ccb_state(acb, pCCB, error);
 }
 
@@ -981,7 +979,7 @@
 	case ACB_ADAPTER_TYPE_B: {
 		struct MessageUnit_B *reg = acb->pmuB;
 		/*clear all outbound posted Q*/
-		writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, &reg->iop2drv_doorbell); /* clear doorbell interrupt */
+		writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
 		for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
 			if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
 				writel(0, &reg->done_qbuffer[i]);
@@ -1511,7 +1509,6 @@
 		arcmsr_drain_donequeue(acb, pCCB, error);
 	}
 }
-
 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
 {
 	uint32_t index;
@@ -2106,10 +2103,6 @@
 	if (atomic_read(&acb->ccboutstandingcount) >=
 			ARCMSR_MAX_OUTSTANDING_CMD)
 		return SCSI_MLQUEUE_HOST_BUSY;
-	if ((scsicmd == SCSI_CMD_ARECA_SPECIFIC)) {
-		printk(KERN_NOTICE "Receiveing SCSI_CMD_ARECA_SPECIFIC command..\n");
-		return 0;
-	}
 	ccb = arcmsr_get_freeccb(acb);
 	if (!ccb)
 		return SCSI_MLQUEUE_HOST_BUSY;
@@ -2393,6 +2386,7 @@
 	int index, rtn;
 	bool error;
 	polling_hbb_ccb_retry:
+
 	poll_count++;
 	/* clear doorbell interrupt */
 	writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
@@ -2663,6 +2657,7 @@
 {
 	struct MessageUnit_A __iomem *reg = acb->pmuA;
 	if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
+		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 		return;
 	} else {
 		acb->fw_flag = FW_NORMAL;
@@ -2670,8 +2665,10 @@
 			atomic_set(&acb->rq_map_token, 16);
 		}
 		atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
-		if (atomic_dec_and_test(&acb->rq_map_token))
+		if (atomic_dec_and_test(&acb->rq_map_token)) {
+			mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 			return;
+		}
 		writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
 		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 	}
@@ -2682,15 +2679,18 @@
 {
 	struct MessageUnit_B __iomem *reg = acb->pmuB;
 	if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
+		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 		return;
 	} else {
 		acb->fw_flag = FW_NORMAL;
 		if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) {
-			atomic_set(&acb->rq_map_token,16);
+			atomic_set(&acb->rq_map_token, 16);
 		}
 		atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
-		if(atomic_dec_and_test(&acb->rq_map_token))
+		if (atomic_dec_and_test(&acb->rq_map_token)) {
+			mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 			return;
+		}
 		writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
 		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 	}
@@ -2701,6 +2701,7 @@
 {
 	struct MessageUnit_C __iomem *reg = acb->pmuC;
 	if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
+		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 		return;
 	} else {
 		acb->fw_flag = FW_NORMAL;
@@ -2708,8 +2709,10 @@
 			atomic_set(&acb->rq_map_token, 16);
 		}
 		atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token));
-		if (atomic_dec_and_test(&acb->rq_map_token))
+		if (atomic_dec_and_test(&acb->rq_map_token)) {
+			mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 			return;
+		}
 		writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
 		writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
 		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
@@ -2897,6 +2900,8 @@
 	uint32_t intmask_org;
 	uint8_t rtnval = 0x00;
 	int i = 0;
+	unsigned long flags;
+
 	if (atomic_read(&acb->ccboutstandingcount) != 0) {
 		/* disable all outbound interrupt */
 		intmask_org = arcmsr_disable_outbound_ints(acb);
@@ -2907,7 +2912,12 @@
 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
 			ccb = acb->pccb_pool[i];
 			if (ccb->startdone == ARCMSR_CCB_START) {
-				arcmsr_ccb_complete(ccb);
+				scsi_dma_unmap(ccb->pcmd);
+				ccb->startdone = ARCMSR_CCB_DONE;
+				ccb->ccb_flags = 0;
+				spin_lock_irqsave(&acb->ccblist_lock, flags);
+				list_add_tail(&ccb->list, &acb->ccb_free_list);
+				spin_unlock_irqrestore(&acb->ccblist_lock, flags);
 			}
 		}
 		atomic_set(&acb->ccboutstandingcount, 0);
@@ -2920,8 +2930,7 @@
 
 static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
 {
-	struct AdapterControlBlock *acb =
-		(struct AdapterControlBlock *)cmd->device->host->hostdata;
+	struct AdapterControlBlock *acb;
 	uint32_t intmask_org, outbound_doorbell;
 	int retry_count = 0;
 	int rtn = FAILED;
@@ -2971,31 +2980,16 @@
 				atomic_set(&acb->rq_map_token, 16);
 				atomic_set(&acb->ante_token_value, 16);
 				acb->fw_flag = FW_NORMAL;
-				init_timer(&acb->eternal_timer);
-				acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
-				acb->eternal_timer.data = (unsigned long) acb;
-				acb->eternal_timer.function = &arcmsr_request_device_map;
-				add_timer(&acb->eternal_timer);
+				mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 				acb->acb_flags &= ~ACB_F_BUS_RESET;
 				rtn = SUCCESS;
 				printk(KERN_ERR "arcmsr: scsi  bus reset eh returns with success\n");
 			} else {
 				acb->acb_flags &= ~ACB_F_BUS_RESET;
-				if (atomic_read(&acb->rq_map_token) == 0) {
-					atomic_set(&acb->rq_map_token, 16);
-					atomic_set(&acb->ante_token_value, 16);
-					acb->fw_flag = FW_NORMAL;
-					init_timer(&acb->eternal_timer);
-						acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
-					acb->eternal_timer.data = (unsigned long) acb;
-					acb->eternal_timer.function = &arcmsr_request_device_map;
-					add_timer(&acb->eternal_timer);
-				} else {
-					atomic_set(&acb->rq_map_token, 16);
-					atomic_set(&acb->ante_token_value, 16);
-					acb->fw_flag = FW_NORMAL;
-					mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
-				}
+				atomic_set(&acb->rq_map_token, 16);
+				atomic_set(&acb->ante_token_value, 16);
+				acb->fw_flag = FW_NORMAL;
+				mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
 				rtn = SUCCESS;
 			}
 			break;
@@ -3007,21 +3001,10 @@
 				rtn = FAILED;
 			} else {
 				acb->acb_flags &= ~ACB_F_BUS_RESET;
-				if (atomic_read(&acb->rq_map_token) == 0) {
-					atomic_set(&acb->rq_map_token, 16);
-					atomic_set(&acb->ante_token_value, 16);
-					acb->fw_flag = FW_NORMAL;
-					init_timer(&acb->eternal_timer);
-						acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
-					acb->eternal_timer.data = (unsigned long) acb;
-					acb->eternal_timer.function = &arcmsr_request_device_map;
-					add_timer(&acb->eternal_timer);
-				} else {
-					atomic_set(&acb->rq_map_token, 16);
-					atomic_set(&acb->ante_token_value, 16);
-					acb->fw_flag = FW_NORMAL;
-					mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
-				}
+				atomic_set(&acb->rq_map_token, 16);
+				atomic_set(&acb->ante_token_value, 16);
+				acb->fw_flag = FW_NORMAL;
+				mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 				rtn = SUCCESS;
 			}
 			break;
@@ -3067,31 +3050,16 @@
 				atomic_set(&acb->rq_map_token, 16);
 				atomic_set(&acb->ante_token_value, 16);
 				acb->fw_flag = FW_NORMAL;
-				init_timer(&acb->eternal_timer);
-				acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
-				acb->eternal_timer.data = (unsigned long) acb;
-				acb->eternal_timer.function = &arcmsr_request_device_map;
-				add_timer(&acb->eternal_timer);
+				mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 				acb->acb_flags &= ~ACB_F_BUS_RESET;
 				rtn = SUCCESS;
 				printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
 			} else {
 				acb->acb_flags &= ~ACB_F_BUS_RESET;
-				if (atomic_read(&acb->rq_map_token) == 0) {
-					atomic_set(&acb->rq_map_token, 16);
-					atomic_set(&acb->ante_token_value, 16);
-					acb->fw_flag = FW_NORMAL;
-					init_timer(&acb->eternal_timer);
-						acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6*HZ);
-					acb->eternal_timer.data = (unsigned long) acb;
-					acb->eternal_timer.function = &arcmsr_request_device_map;
-					add_timer(&acb->eternal_timer);
-				} else {
-					atomic_set(&acb->rq_map_token, 16);
-					atomic_set(&acb->ante_token_value, 16);
-					acb->fw_flag = FW_NORMAL;
-					mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
-				}
+				atomic_set(&acb->rq_map_token, 16);
+				atomic_set(&acb->ante_token_value, 16);
+				acb->fw_flag = FW_NORMAL;
+				mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ));
 				rtn = SUCCESS;
 			}
 			break;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 5815cbe..9a7aaf5 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -646,6 +646,7 @@
 
 	spin_lock_irqsave(shost->host_lock, flags);
 	list_splice_init(&shost->eh_cmd_q, &eh_work_q);
+	shost->host_eh_scheduled = 0;
 	spin_unlock_irqrestore(shost->host_lock, flags);
 
 	SAS_DPRINTK("Enter %s\n", __func__);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index b2a8170..9ead03998 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -2176,9 +2176,9 @@
 		/* adjust hba_queue_depth, reply_free_queue_depth,
 		 * and queue_size
 		 */
-		ioc->hba_queue_depth -= queue_diff;
-		ioc->reply_free_queue_depth -= queue_diff;
-		queue_size -= queue_diff;
+		ioc->hba_queue_depth -= (queue_diff / 2);
+		ioc->reply_free_queue_depth -= (queue_diff / 2);
+		queue_size = facts->MaxReplyDescriptorPostQueueDepth;
 	}
 	ioc->reply_post_queue_depth = queue_size;
 
@@ -3941,6 +3941,8 @@
 static void
 _base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
 {
+	mpt2sas_scsih_reset_handler(ioc, reset_phase);
+	mpt2sas_ctl_reset_handler(ioc, reset_phase);
 	switch (reset_phase) {
 	case MPT2_IOC_PRE_RESET:
 		dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: "
@@ -3971,8 +3973,6 @@
 		    "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
 		break;
 	}
-	mpt2sas_scsih_reset_handler(ioc, reset_phase);
-	mpt2sas_ctl_reset_handler(ioc, reset_phase);
 }
 
 /**
@@ -4026,6 +4026,7 @@
 {
 	int r;
 	unsigned long flags;
+	u8 pe_complete = ioc->wait_for_port_enable_to_complete;
 
 	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
 	    __func__));
@@ -4068,6 +4069,14 @@
 	if (r)
 		goto out;
 	_base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
+
+	/* If this hard reset is called while port enable is active, then
+	 * there is no reason to call make_ioc_operational
+	 */
+	if (pe_complete) {
+		r = -EFAULT;
+		goto out;
+	}
 	r = _base_make_ioc_operational(ioc, sleep_flag);
 	if (!r)
 		_base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index eda347c..5ded3db 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -819,7 +819,7 @@
 }
 
 /**
- * mptscsih_get_scsi_lookup - returns scmd entry
+ * _scsih_scsi_lookup_get - returns scmd entry
  * @ioc: per adapter object
  * @smid: system request message index
  *
@@ -832,6 +832,28 @@
 }
 
 /**
+ * _scsih_scsi_lookup_get_clear - returns scmd entry
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns the smid stored scmd pointer.
+ * Then will derefrence the stored scmd pointer.
+ */
+static inline struct scsi_cmnd *
+_scsih_scsi_lookup_get_clear(struct MPT2SAS_ADAPTER *ioc, u16 smid)
+{
+	unsigned long flags;
+	struct scsi_cmnd *scmd;
+
+	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+	scmd = ioc->scsi_lookup[smid - 1].scmd;
+	ioc->scsi_lookup[smid - 1].scmd = NULL;
+	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+	return scmd;
+}
+
+/**
  * _scsih_scsi_lookup_find_by_scmd - scmd lookup
  * @ioc: per adapter object
  * @smid: system request message index
@@ -2981,9 +3003,6 @@
 	u16 handle;
 
 	for (i = 0 ; i < event_data->NumEntries; i++) {
-		if (event_data->PHY[i].PhyStatus &
-		    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT)
-			continue;
 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
 		if (!handle)
 			continue;
@@ -3210,7 +3229,7 @@
 	u16 count = 0;
 
 	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
-		scmd = _scsih_scsi_lookup_get(ioc, smid);
+		scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
 		if (!scmd)
 			continue;
 		count++;
@@ -3804,7 +3823,7 @@
 	u32 response_code = 0;
 
 	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
-	scmd = _scsih_scsi_lookup_get(ioc, smid);
+	scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
 	if (scmd == NULL)
 		return 1;
 
@@ -5005,6 +5024,12 @@
 		     event_data);
 #endif
 
+	/* In MPI Revision K (0xC), the internal device reset complete was
+	 * implemented, so avoid setting tm_busy flag for older firmware.
+	 */
+	if ((ioc->facts.HeaderVersion >> 8) < 0xC)
+		return;
+
 	if (event_data->ReasonCode !=
 	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
 	   event_data->ReasonCode !=
@@ -5099,6 +5124,7 @@
     struct fw_event_work *fw_event)
 {
 	struct scsi_cmnd *scmd;
+	struct scsi_device *sdev;
 	u16 smid, handle;
 	u32 lun;
 	struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -5109,12 +5135,17 @@
 	Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
 #endif
 	u16 ioc_status;
+	unsigned long flags;
+	int r;
+
 	dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "broadcast primative: "
 	    "phy number(%d), width(%d)\n", ioc->name, event_data->PhyNum,
 	    event_data->PortWidth));
 	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter\n", ioc->name,
 	    __func__));
 
+	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+	ioc->broadcast_aen_busy = 0;
 	termination_count = 0;
 	query_count = 0;
 	mpi_reply = ioc->tm_cmds.reply;
@@ -5122,7 +5153,8 @@
 		scmd = _scsih_scsi_lookup_get(ioc, smid);
 		if (!scmd)
 			continue;
-		sas_device_priv_data = scmd->device->hostdata;
+		sdev = scmd->device;
+		sas_device_priv_data = sdev->hostdata;
 		if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
 			continue;
 		 /* skip hidden raid components */
@@ -5138,6 +5170,7 @@
 		lun = sas_device_priv_data->lun;
 		query_count++;
 
+		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
 		mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
 		    MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, NULL);
 		ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
@@ -5147,14 +5180,20 @@
 		    (mpi_reply->ResponseCode ==
 		     MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
 		     mpi_reply->ResponseCode ==
-		     MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
+		     MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) {
+			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
 			continue;
-
-		mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
-		    MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, 0, 30, NULL);
+		}
+		r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
+		    sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
+		    scmd);
+		if (r == FAILED)
+			sdev_printk(KERN_WARNING, sdev, "task abort: FAILED "
+			    "scmd(%p)\n", scmd);
 		termination_count += le32_to_cpu(mpi_reply->TerminationCount);
+		spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
 	}
-	ioc->broadcast_aen_busy = 0;
+	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
 
 	dtmprintk(ioc, printk(MPT2SAS_INFO_FMT
 	    "%s - exit, query_count = %d termination_count = %d\n",
@@ -6626,6 +6665,7 @@
 		destroy_workqueue(wq);
 
 	/* release all the volumes */
+	_scsih_ir_shutdown(ioc);
 	list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
 	    list) {
 		if (raid_device->starget) {
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 44578b5..d3e58d7 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1561,6 +1561,7 @@
 {
 	struct Scsi_Host *host = rport_to_shost(rport);
 	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+	unsigned long flags;
 
 	if (!fcport)
 		return;
@@ -1573,10 +1574,10 @@
 	 * Transport has effectively 'deleted' the rport, clear
 	 * all local references.
 	 */
-	spin_lock_irq(host->host_lock);
+	spin_lock_irqsave(host->host_lock, flags);
 	fcport->rport = fcport->drport = NULL;
 	*((fc_port_t **)rport->dd_data) = NULL;
-	spin_unlock_irq(host->host_lock);
+	spin_unlock_irqrestore(host->host_lock, flags);
 
 	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
 		return;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f948e1a..d9479c3 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2505,11 +2505,12 @@
 {
 	fc_port_t *fcport = data;
 	struct fc_rport *rport;
+	unsigned long flags;
 
-	spin_lock_irq(fcport->vha->host->host_lock);
+	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
 	rport = fcport->drport ? fcport->drport: fcport->rport;
 	fcport->drport = NULL;
-	spin_unlock_irq(fcport->vha->host->host_lock);
+	spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
 	if (rport)
 		fc_remote_port_delete(rport);
 }
@@ -2879,6 +2880,7 @@
 	struct fc_rport_identifiers rport_ids;
 	struct fc_rport *rport;
 	struct qla_hw_data *ha = vha->hw;
+	unsigned long flags;
 
 	qla2x00_rport_del(fcport);
 
@@ -2893,9 +2895,9 @@
 		    "Unable to allocate fc remote port!\n");
 		return;
 	}
-	spin_lock_irq(fcport->vha->host->host_lock);
+	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
 	*((fc_port_t **)rport->dd_data) = fcport;
-	spin_unlock_irq(fcport->vha->host->host_lock);
+	spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
 
 	rport->supported_classes = fcport->supported_classes;
 
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index c194c23..f27724d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -562,7 +562,6 @@
 	}
 	if (atomic_read(&fcport->state) != FCS_ONLINE) {
 		if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
-			atomic_read(&fcport->state) == FCS_DEVICE_LOST ||
 			atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
 			cmd->result = DID_NO_CONNECT << 16;
 			goto qc24_fail_command;
@@ -2513,6 +2512,7 @@
 {
 	struct fc_rport *rport;
 	scsi_qla_host_t *base_vha;
+	unsigned long flags;
 
 	if (!fcport->rport)
 		return;
@@ -2520,9 +2520,9 @@
 	rport = fcport->rport;
 	if (defer) {
 		base_vha = pci_get_drvdata(vha->hw->pdev);
-		spin_lock_irq(vha->host->host_lock);
+		spin_lock_irqsave(vha->host->host_lock, flags);
 		fcport->drport = rport;
-		spin_unlock_irq(vha->host->host_lock);
+		spin_unlock_irqrestore(vha->host->host_lock, flags);
 		set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
 		qla2xxx_wake_dpc(base_vha);
 	} else
@@ -3282,10 +3282,10 @@
 
 	set_user_nice(current, -20);
 
+	set_current_state(TASK_INTERRUPTIBLE);
 	while (!kthread_should_stop()) {
 		DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
 
-		set_current_state(TASK_INTERRUPTIBLE);
 		schedule();
 		__set_current_state(TASK_RUNNING);
 
@@ -3454,7 +3454,9 @@
 		qla2x00_do_dpc_all_vps(base_vha);
 
 		ha->dpc_active = 0;
+		set_current_state(TASK_INTERRUPTIBLE);
 	} /* End of while(1) */
+	__set_current_state(TASK_RUNNING);
 
 	DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
 
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 7b31093..a6b2d72 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1671,7 +1671,7 @@
 			    unsigned long long lba, unsigned int num, int write)
 {
 	int ret;
-	unsigned int block, rest = 0;
+	unsigned long long block, rest = 0;
 	int (*func)(struct scsi_cmnd *, unsigned char *, int);
 
 	func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
diff --git a/drivers/sh/intc/chip.c b/drivers/sh/intc/chip.c
index de885a0..f33e2dd 100644
--- a/drivers/sh/intc/chip.c
+++ b/drivers/sh/intc/chip.c
@@ -173,7 +173,8 @@
 	return 0;
 }
 
-#define VALID(x) (x | 0x80)
+#define SENSE_VALID_FLAG 0x80
+#define VALID(x) (x | SENSE_VALID_FLAG)
 
 static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
 	[IRQ_TYPE_EDGE_FALLING] = VALID(0),
@@ -201,7 +202,8 @@
 	ihp = intc_find_irq(d->sense, d->nr_sense, irq);
 	if (ihp) {
 		addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
-		intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
+		intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle,
+						    value & ~SENSE_VALID_FLAG);
 	}
 
 	return 0;
diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c
index 351d8a3..19752b0 100644
--- a/drivers/spi/pxa2xx_spi_pci.c
+++ b/drivers/spi/pxa2xx_spi_pci.c
@@ -7,10 +7,9 @@
 #include <linux/of_device.h>
 #include <linux/spi/pxa2xx_spi.h>
 
-struct awesome_struct {
+struct ce4100_info {
 	struct ssp_device ssp;
-	struct platform_device spi_pdev;
-	struct pxa2xx_spi_master spi_pdata;
+	struct platform_device *spi_pdev;
 };
 
 static DEFINE_MUTEX(ssp_lock);
@@ -51,23 +50,15 @@
 }
 EXPORT_SYMBOL_GPL(pxa_ssp_free);
 
-static void plat_dev_release(struct device *dev)
-{
-	struct awesome_struct *as = container_of(dev,
-			struct awesome_struct, spi_pdev.dev);
-
-	of_device_node_put(&as->spi_pdev.dev);
-}
-
 static int __devinit ce4100_spi_probe(struct pci_dev *dev,
 		const struct pci_device_id *ent)
 {
 	int ret;
 	resource_size_t phys_beg;
 	resource_size_t phys_len;
-	struct awesome_struct *spi_info;
+	struct ce4100_info *spi_info;
 	struct platform_device *pdev;
-	struct pxa2xx_spi_master *spi_pdata;
+	struct pxa2xx_spi_master spi_pdata;
 	struct ssp_device *ssp;
 
 	ret = pci_enable_device(dev);
@@ -84,33 +75,30 @@
 		return ret;
 	}
 
+	pdev = platform_device_alloc("pxa2xx-spi", dev->devfn);
 	spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL);
-	if (!spi_info) {
+	if (!pdev || !spi_info ) {
 		ret = -ENOMEM;
-		goto err_kz;
+		goto err_nomem;
 	}
-	ssp = &spi_info->ssp;
-	pdev = &spi_info->spi_pdev;
-	spi_pdata =  &spi_info->spi_pdata;
+	memset(&spi_pdata, 0, sizeof(spi_pdata));
+	spi_pdata.num_chipselect = dev->devfn;
 
-	pdev->name = "pxa2xx-spi";
-	pdev->id = dev->devfn;
+	ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata));
+	if (ret)
+		goto err_nomem;
+
 	pdev->dev.parent = &dev->dev;
-	pdev->dev.platform_data = &spi_info->spi_pdata;
-
 #ifdef CONFIG_OF
 	pdev->dev.of_node = dev->dev.of_node;
 #endif
-	pdev->dev.release = plat_dev_release;
-
-	spi_pdata->num_chipselect = dev->devfn;
-
+	ssp = &spi_info->ssp;
 	ssp->phys_base = pci_resource_start(dev, 0);
 	ssp->mmio_base = ioremap(phys_beg, phys_len);
 	if (!ssp->mmio_base) {
 		dev_err(&pdev->dev, "failed to ioremap() registers\n");
 		ret = -EIO;
-		goto err_remap;
+		goto err_nomem;
 	}
 	ssp->irq = dev->irq;
 	ssp->port_id = pdev->id;
@@ -122,7 +110,7 @@
 
 	pci_set_drvdata(dev, spi_info);
 
-	ret = platform_device_register(pdev);
+	ret = platform_device_add(pdev);
 	if (ret)
 		goto err_dev_add;
 
@@ -135,27 +123,21 @@
 	mutex_unlock(&ssp_lock);
 	iounmap(ssp->mmio_base);
 
-err_remap:
-	kfree(spi_info);
-
-err_kz:
+err_nomem:
 	release_mem_region(phys_beg, phys_len);
-
+	platform_device_put(pdev);
+	kfree(spi_info);
 	return ret;
 }
 
 static void __devexit ce4100_spi_remove(struct pci_dev *dev)
 {
-	struct awesome_struct *spi_info;
-	struct platform_device *pdev;
+	struct ce4100_info *spi_info;
 	struct ssp_device *ssp;
 
 	spi_info = pci_get_drvdata(dev);
-
 	ssp = &spi_info->ssp;
-	pdev = &spi_info->spi_pdev;
-
-	platform_device_unregister(pdev);
+	platform_device_unregister(spi_info->spi_pdev);
 
 	iounmap(ssp->mmio_base);
 	release_mem_region(pci_resource_start(dev, 0),
@@ -171,7 +153,6 @@
 }
 
 static struct pci_device_id ce4100_spi_devices[] __devinitdata = {
-
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) },
 	{ },
 };
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c
index 56f60c8..2c665fcea 100644
--- a/drivers/spi/spi_sh_msiof.c
+++ b/drivers/spi/spi_sh_msiof.c
@@ -509,9 +509,11 @@
 	bytes_done = 0;
 
 	while (bytes_done < t->len) {
+		void *rx_buf = t->rx_buf ? t->rx_buf + bytes_done : NULL;
+		const void *tx_buf = t->tx_buf ? t->tx_buf + bytes_done : NULL;
 		n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo,
-					   t->tx_buf + bytes_done,
-					   t->rx_buf + bytes_done,
+					   tx_buf,
+					   rx_buf,
 					   words, bits);
 		if (n < 0)
 			break;
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 2d8cc45..42cdaa9 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -82,7 +82,7 @@
 
 config SSB_SILENT
 	bool "No SSB kernel messages"
-	depends on SSB && EMBEDDED
+	depends on SSB && EXPERT
 	help
 	  This option turns off all Sonics Silicon Backplane printks.
 	  Note that you won't be able to identify problems, once
diff --git a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c
index 0e298db..29b8ab4 100644
--- a/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c
+++ b/drivers/staging/ath6kl/miscdrv/ar3kps/ar3kpsconfig.c
@@ -360,8 +360,8 @@
         	status = 1;
         	goto complete;
     	}
-        len = (firmware->size > MAX_BDADDR_FORMAT_LENGTH)? MAX_BDADDR_FORMAT_LENGTH: firmware->size;
-	memcpy(config_bdaddr, firmware->data,len);
+	len = min(firmware->size, MAX_BDADDR_FORMAT_LENGTH - 1);
+	memcpy(config_bdaddr, firmware->data, len);
 	config_bdaddr[len] = '\0';
 	write_bdaddr(hdev,config_bdaddr,BDADDR_TYPE_STRING);
        	A_RELEASE_FIRMWARE(firmware);
diff --git a/drivers/staging/brcm80211/sys/wl_mac80211.c b/drivers/staging/brcm80211/sys/wl_mac80211.c
index bdd629d..cd8392b 100644
--- a/drivers/staging/brcm80211/sys/wl_mac80211.c
+++ b/drivers/staging/brcm80211/sys/wl_mac80211.c
@@ -209,11 +209,8 @@
 	struct wl_info *wl = hw->priv;
 	ASSERT(wl);
 	WL_LOCK(wl);
-	wl_down(wl);
 	ieee80211_stop_queues(hw);
 	WL_UNLOCK(wl);
-
-	return;
 }
 
 static int
@@ -246,7 +243,14 @@
 static void
 wl_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 {
-	return;
+	struct wl_info *wl;
+
+	wl = HW_TO_WL(hw);
+
+	/* put driver in down state */
+	WL_LOCK(wl);
+	wl_down(wl);
+	WL_UNLOCK(wl);
 }
 
 static int
@@ -259,9 +263,7 @@
 	switch (type) {
 	case NL80211_CHAN_HT20:
 	case NL80211_CHAN_NO_HT:
-		WL_LOCK(wl);
 		err = wlc_set(wl->wlc, WLC_SET_CHANNEL, chan->hw_value);
-		WL_UNLOCK(wl);
 		break;
 	case NL80211_CHAN_HT40MINUS:
 	case NL80211_CHAN_HT40PLUS:
@@ -281,6 +283,7 @@
 	int err = 0;
 	int new_int;
 
+	WL_LOCK(wl);
 	if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
 		WL_NONE("%s: Setting listen interval to %d\n",
 			__func__, conf->listen_interval);
@@ -337,6 +340,7 @@
 	}
 
  config_out:
+	WL_UNLOCK(wl);
 	return err;
 }
 
@@ -455,13 +459,21 @@
 
 static void wl_ops_sw_scan_start(struct ieee80211_hw *hw)
 {
+	struct wl_info *wl = hw->priv;
 	WL_NONE("Scan Start\n");
+	WL_LOCK(wl);
+	wlc_scan_start(wl->wlc);
+	WL_UNLOCK(wl);
 	return;
 }
 
 static void wl_ops_sw_scan_complete(struct ieee80211_hw *hw)
 {
+	struct wl_info *wl = hw->priv;
 	WL_NONE("Scan Complete\n");
+	WL_LOCK(wl);
+	wlc_scan_stop(wl->wlc);
+	WL_UNLOCK(wl);
 	return;
 }
 
@@ -779,7 +791,7 @@
 	wl_found++;
 	return wl;
 
- fail:
+fail:
 	wl_free(wl);
 fail1:
 	return NULL;
@@ -1090,7 +1102,6 @@
 	return 0;
 }
 
-#ifdef LINUXSTA_PS
 static int wl_suspend(struct pci_dev *pdev, pm_message_t state)
 {
 	struct wl_info *wl;
@@ -1105,11 +1116,12 @@
 		return -ENODEV;
 	}
 
+	/* only need to flag hw is down for proper resume */
 	WL_LOCK(wl);
-	wl_down(wl);
 	wl->pub->hw_up = false;
 	WL_UNLOCK(wl);
-	pci_save_state(pdev, wl->pci_psstate);
+
+	pci_save_state(pdev);
 	pci_disable_device(pdev);
 	return pci_set_power_state(pdev, PCI_D3hot);
 }
@@ -1133,7 +1145,7 @@
 	if (err)
 		return err;
 
-	pci_restore_state(pdev, wl->pci_psstate);
+	pci_restore_state(pdev);
 
 	err = pci_enable_device(pdev);
 	if (err)
@@ -1145,13 +1157,12 @@
 	if ((val & 0x0000ff00) != 0)
 		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
 
-	WL_LOCK(wl);
-	err = wl_up(wl);
-	WL_UNLOCK(wl);
-
+	/*
+	*  done. driver will be put in up state
+	*  in wl_ops_add_interface() call.
+	*/
 	return err;
 }
-#endif				/* LINUXSTA_PS */
 
 static void wl_remove(struct pci_dev *pdev)
 {
@@ -1184,14 +1195,12 @@
 }
 
 static struct pci_driver wl_pci_driver = {
- .name  = "brcm80211",
- .probe = wl_pci_probe,
-#ifdef LINUXSTA_PS
- .suspend = wl_suspend,
- .resume  = wl_resume,
-#endif				/* LINUXSTA_PS */
- .remove   = __devexit_p(wl_remove),
- .id_table = wl_id_table,
+	.name = "brcm80211",
+	.probe = wl_pci_probe,
+	.suspend = wl_suspend,
+	.resume = wl_resume,
+	.remove = __devexit_p(wl_remove),
+	.id_table = wl_id_table,
 };
 
 /**
diff --git a/drivers/staging/brcm80211/sys/wlc_mac80211.c b/drivers/staging/brcm80211/sys/wlc_mac80211.c
index 1d5d01a..e37e805 100644
--- a/drivers/staging/brcm80211/sys/wlc_mac80211.c
+++ b/drivers/staging/brcm80211/sys/wlc_mac80211.c
@@ -5126,7 +5126,6 @@
 	fifo = prio2fifo[prio];
 
 	ASSERT((uint) skb_headroom(sdu) >= TXOFF);
-	ASSERT(!(sdu->cloned));
 	ASSERT(!(sdu->next));
 	ASSERT(!(sdu->prev));
 	ASSERT(fifo < NFIFO);
@@ -8462,3 +8461,16 @@
 
 	kfree(qi);
 }
+
+/*
+ * Flag 'scan in progress' to withold dynamic phy calibration
+ */
+void wlc_scan_start(struct wlc_info *wlc)
+{
+	wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, true);
+}
+
+void wlc_scan_stop(struct wlc_info *wlc)
+{
+	wlc_phy_hold_upd(wlc->band->pi, PHY_HOLD_FOR_SCAN, false);
+}
diff --git a/drivers/staging/brcm80211/sys/wlc_pub.h b/drivers/staging/brcm80211/sys/wlc_pub.h
index 146a690..aff4130 100644
--- a/drivers/staging/brcm80211/sys/wlc_pub.h
+++ b/drivers/staging/brcm80211/sys/wlc_pub.h
@@ -570,6 +570,8 @@
 extern u16 wlc_rate_shm_offset(struct wlc_info *wlc, u8 rate);
 extern u32 wlc_get_rspec_history(struct wlc_bsscfg *cfg);
 extern u32 wlc_get_current_highest_rate(struct wlc_bsscfg *cfg);
+extern void wlc_scan_start(struct wlc_info *wlc);
+extern void wlc_scan_stop(struct wlc_info *wlc);
 
 static inline int wlc_iovar_getuint(struct wlc_info *wlc, const char *name,
 				    uint *arg)
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index aad4732..1502d80 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -439,6 +439,7 @@
 config COMEDI_NI_ATMIO
 	tristate "NI AT-MIO E series ISA-PNP card support"
 	depends on ISAPNP && COMEDI_NI_TIO && COMEDI_NI_COMMON
+	select COMEDI_8255
 	default N
 	---help---
 	  Enable support for National Instruments AT-MIO E series cards
@@ -1040,6 +1041,8 @@
 config COMEDI_NI_PCIMIO
 	tristate "NI PCI-MIO-E series and M series support"
 	depends on COMEDI_NI_TIO && COMEDI_NI_COMMON
+	select COMEDI_8255
+	select COMEDI_FC
 	default N
 	---help---
 	  Enable support for National Instruments PCI-MIO-E series and M series
@@ -1164,6 +1167,7 @@
 config COMEDI_NI_MIO_CS
 	tristate "NI DAQCard E series PCMCIA support"
 	depends on COMEDI_NI_TIO && COMEDI_NI_COMMON
+	select COMEDI_8255
 	select COMEDI_FC
 	default N
 	---help---
@@ -1268,7 +1272,6 @@
 config COMEDI_NI_TIO
 	tristate "NI general purpose counter support"
 	depends on COMEDI_MITE
-	select COMEDI_8255
 	default N
 	---help---
 	  Enable support for National Instruments general purpose counters.
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index cd25b24..fd274e9 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -61,8 +61,6 @@
 #define PCI_DAQ_SIZE		4096
 #define PCI_DAQ_SIZE_660X       8192
 
-MODULE_LICENSE("GPL");
-
 struct mite_struct *mite_devices;
 EXPORT_SYMBOL(mite_devices);
 
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
index 14e716e..54741c9 100644
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ b/drivers/staging/comedi/drivers/ni_6527.c
@@ -527,3 +527,7 @@
 
 module_init(driver_ni6527_init_module);
 module_exit(driver_ni6527_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 8b8e2aa..403fc09 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -871,3 +871,7 @@
 
 module_init(driver_ni_65xx_init_module);
 module_exit(driver_ni_65xx_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 6612b08..ca2aeaa 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -1421,3 +1421,7 @@
 	};
 	return 0;
 }
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
index e9f034e..d8d91f9 100644
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ b/drivers/staging/comedi/drivers/ni_670x.c
@@ -384,3 +384,7 @@
 	mite_list_devices();
 	return -EIO;
 }
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index 4d1868d..0728c3c 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -575,7 +575,8 @@
 	/* grab our IRQ */
 	if (irq) {
 		isr_flags = 0;
-		if (thisboard->bustype == pci_bustype)
+		if (thisboard->bustype == pci_bustype
+		    || thisboard->bustype == pcmcia_bustype)
 			isr_flags |= IRQF_SHARED;
 		if (request_irq(irq, labpc_interrupt, isr_flags,
 				driver_labpc.driver_name, dev)) {
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 84a15c3..005d2fe 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -1354,3 +1354,7 @@
 
 module_init(driver_pcidio_init_module);
 module_exit(driver_pcidio_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 23a3812..9148abd 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1853,3 +1853,7 @@
 
 	return 0;
 }
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
index b3d05fc..4fb8094 100644
--- a/drivers/staging/hv/blkvsc_drv.c
+++ b/drivers/staging/hv/blkvsc_drv.c
@@ -368,6 +368,7 @@
 		blkdev->gd->first_minor = 0;
 	blkdev->gd->fops = &block_ops;
 	blkdev->gd->private_data = blkdev;
+	blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device);
 	sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum);
 
 	blkvsc_do_inquiry(blkdev);
diff --git a/drivers/staging/hv/netvsc.c b/drivers/staging/hv/netvsc.c
index df9cd13..0edbe74 100644
--- a/drivers/staging/hv/netvsc.c
+++ b/drivers/staging/hv/netvsc.c
@@ -1279,7 +1279,7 @@
 	/* ASSERT(device); */
 
 	packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
-			 GFP_KERNEL);
+			 GFP_ATOMIC);
 	if (!packet)
 		return;
 	buffer = packet;
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
index 0147b40..b41c964 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/staging/hv/netvsc_drv.c
@@ -236,6 +236,7 @@
 	if (status == 1) {
 		netif_carrier_on(net);
 		netif_wake_queue(net);
+		netif_notify_peers(net);
 	} else {
 		netif_carrier_off(net);
 		netif_stop_queue(net);
@@ -358,7 +359,6 @@
 
 	/* Set initial state */
 	netif_carrier_off(net);
-	netif_stop_queue(net);
 
 	net_device_ctx = netdev_priv(net);
 	net_device_ctx->device_ctx = device_ctx;
diff --git a/drivers/staging/iio/adc/ad7476_core.c b/drivers/staging/iio/adc/ad7476_core.c
index deb68c8..b8b54da 100644
--- a/drivers/staging/iio/adc/ad7476_core.c
+++ b/drivers/staging/iio/adc/ad7476_core.c
@@ -68,7 +68,7 @@
 	/* Corresponds to Vref / 2^(bits) */
 	unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits;
 
-	return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000);
+	return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
 }
 static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad7476_show_scale, NULL, 0);
 
diff --git a/drivers/staging/iio/adc/ad7887_core.c b/drivers/staging/iio/adc/ad7887_core.c
index 6859089..5d85efa 100644
--- a/drivers/staging/iio/adc/ad7887_core.c
+++ b/drivers/staging/iio/adc/ad7887_core.c
@@ -68,7 +68,7 @@
 	/* Corresponds to Vref / 2^(bits) */
 	unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits;
 
-	return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000);
+	return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
 }
 static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad7887_show_scale, NULL, 0);
 
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index 6309d52..89ccf37 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -432,7 +432,7 @@
 	/* Corresponds to Vref / 2^(bits) */
 	unsigned int scale_uv = (st->int_vref_mv * 1000) >> st->chip_info->bits;
 
-	return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000);
+	return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
 }
 
 static IIO_DEVICE_ATTR(in_scale, S_IRUGO, ad799x_show_scale, NULL, 0);
diff --git a/drivers/staging/iio/dac/ad5446.c b/drivers/staging/iio/dac/ad5446.c
index e3387cd..0f87eca 100644
--- a/drivers/staging/iio/dac/ad5446.c
+++ b/drivers/staging/iio/dac/ad5446.c
@@ -87,7 +87,7 @@
 	/* Corresponds to Vref / 2^(bits) */
 	unsigned int scale_uv = (st->vref_mv * 1000) >> st->chip_info->bits;
 
-	return sprintf(buf, "%d.%d\n", scale_uv / 1000, scale_uv % 1000);
+	return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
 }
 static IIO_DEVICE_ATTR(out_scale, S_IRUGO, ad5446_show_scale, NULL, 0);
 
diff --git a/drivers/staging/intel_sst/intelmid_v2_control.c b/drivers/staging/intel_sst/intelmid_v2_control.c
index e38e89d..e2f6d6a 100644
--- a/drivers/staging/intel_sst/intelmid_v2_control.c
+++ b/drivers/staging/intel_sst/intelmid_v2_control.c
@@ -874,7 +874,10 @@
 		sc_access[3].reg_addr = 0x109;
 		sc_access[3].mask = MASK6;
 		sc_access[3].value = 0x00;
-		num_val = 4;
+		sc_access[4].reg_addr = 0x104;
+		sc_access[4].value = 0x3C;
+		sc_access[4].mask = 0xff;
+		num_val = 5;
 		break;
 	default:
 		return -EINVAL;
diff --git a/drivers/staging/lirc/TODO.lirc_zilog b/drivers/staging/lirc/TODO.lirc_zilog
index 6aa312d..2d0263f 100644
--- a/drivers/staging/lirc/TODO.lirc_zilog
+++ b/drivers/staging/lirc/TODO.lirc_zilog
@@ -1,13 +1,37 @@
-The binding between hdpvr and lirc_zilog is currently disabled,
+1. Both ir-kbd-i2c and lirc_zilog provide support for RX events.
+The 'tx_only' lirc_zilog module parameter will allow ir-kbd-i2c
+and lirc_zilog to coexist in the kernel, if the user requires such a set-up.
+However the IR unit will not work well without coordination between the
+two modules.  A shared mutex, for transceiver access locking, needs to be
+supplied by bridge drivers, in struct IR_i2_init_data, to both ir-kbd-i2c
+and lirc_zilog, before they will coexist usefully.  This should be fixed
+before moving out of staging.
+
+2. References and locking need careful examination.  For cx18 and ivtv PCI
+cards, which are not easily "hot unplugged", the imperfect state of reference
+counting and locking is acceptable if not correct.  For USB connected units
+like HD PVR, PVR USB2, HVR-1900, and HVR1950, the likelyhood of an Ooops on
+unplug is probably great.  Proper reference counting and locking needs to be
+implemented before this module is moved out of staging.
+
+3. The binding between hdpvr and lirc_zilog is currently disabled,
 due to an OOPS reported a few years ago when both the hdpvr and cx18
 drivers were loaded in his system. More details can be seen at:
 	http://www.mail-archive.com/linux-media@vger.kernel.org/msg09163.html
 More tests need to be done, in order to fix the reported issue.
 
-There's a conflict between ir-kbd-i2c: Both provide support for RX events.
-Such conflict needs to be fixed, before moving it out of staging.
+4. In addition to providing a shared mutex for transceiver access
+locking, bridge drivers, if able, should provide a chip reset() callback
+to lirc_zilog via struct IR_i2c_init_data.  cx18 and ivtv already have routines
+to perform Z8 chip resets via GPIO manipulations.  This will allow lirc_zilog
+to bring the chip back to normal when it hangs, in the same places the
+original lirc_pvr150 driver code does.  This is not strictly needed, so it
+is not required to move lirc_zilog out of staging.
 
-The way I2C probe works, it will try to register the driver twice, one
-for RX and another for TX. The logic needs to be fixed to avoid such
-issue.
+5. Both lirc_zilog and ir-kbd-i2c support the Zilog Z8 for IR, as programmed
+and installed on Hauppauge products.  When working on either module, developers
+must consider at least the following bridge drivers which mention an IR Rx unit
+at address 0x71 (indicative of a Z8):
+
+	ivtv cx18 hdpvr pvrusb2 bt8xx cx88 saa7134
 
diff --git a/drivers/staging/lirc/lirc_imon.c b/drivers/staging/lirc/lirc_imon.c
index 0da6b95..235cab0 100644
--- a/drivers/staging/lirc/lirc_imon.c
+++ b/drivers/staging/lirc/lirc_imon.c
@@ -447,6 +447,7 @@
 
 exit:
 	mutex_unlock(&context->ctx_lock);
+	kfree(data_buf);
 
 	return (!retval) ? n_bytes : retval;
 }
diff --git a/drivers/staging/lirc/lirc_it87.c b/drivers/staging/lirc/lirc_it87.c
index 929ae57..5938616 100644
--- a/drivers/staging/lirc/lirc_it87.c
+++ b/drivers/staging/lirc/lirc_it87.c
@@ -232,6 +232,7 @@
 		i++;
 	}
 	terminate_send(tx_buf[i - 1]);
+	kfree(tx_buf);
 	return n;
 }
 
diff --git a/drivers/staging/lirc/lirc_parallel.c b/drivers/staging/lirc/lirc_parallel.c
index dfd2c44..3a9c098 100644
--- a/drivers/staging/lirc/lirc_parallel.c
+++ b/drivers/staging/lirc/lirc_parallel.c
@@ -376,6 +376,7 @@
 	unsigned long flags;
 	int counttimer;
 	int *wbuf;
+	ssize_t ret;
 
 	if (!is_claimed)
 		return -EBUSY;
@@ -393,8 +394,10 @@
 	if (timer == 0) {
 		/* try again if device is ready */
 		timer = init_lirc_timer();
-		if (timer == 0)
-			return -EIO;
+		if (timer == 0) {
+			ret = -EIO;
+			goto out;
+		}
 	}
 
 	/* adjust values from usecs */
@@ -420,7 +423,8 @@
 			if (check_pselecd && (in(1) & LP_PSELECD)) {
 				lirc_off();
 				local_irq_restore(flags);
-				return -EIO;
+				ret = -EIO;
+				goto out;
 			}
 		} while (counttimer < wbuf[i]);
 		i++;
@@ -436,7 +440,8 @@
 			level = newlevel;
 			if (check_pselecd && (in(1) & LP_PSELECD)) {
 				local_irq_restore(flags);
-				return -EIO;
+				ret = -EIO;
+				goto out;
 			}
 		} while (counttimer < wbuf[i]);
 		i++;
@@ -445,7 +450,11 @@
 #else
 	/* place code that handles write without external timer here */
 #endif
-	return n;
+	ret = n;
+out:
+	kfree(wbuf);
+
+	return ret;
 }
 
 static unsigned int lirc_poll(struct file *file, poll_table *wait)
diff --git a/drivers/staging/lirc/lirc_sasem.c b/drivers/staging/lirc/lirc_sasem.c
index 998485e..925eabe 100644
--- a/drivers/staging/lirc/lirc_sasem.c
+++ b/drivers/staging/lirc/lirc_sasem.c
@@ -448,6 +448,7 @@
 exit:
 
 	mutex_unlock(&context->ctx_lock);
+	kfree(data_buf);
 
 	return (!retval) ? n_bytes : retval;
 }
diff --git a/drivers/staging/lirc/lirc_serial.c b/drivers/staging/lirc/lirc_serial.c
index 9bcf149..1c3099b 100644
--- a/drivers/staging/lirc/lirc_serial.c
+++ b/drivers/staging/lirc/lirc_serial.c
@@ -966,7 +966,7 @@
 	if (n % sizeof(int) || count % 2 == 0)
 		return -EINVAL;
 	wbuf = memdup_user(buf, n);
-	if (PTR_ERR(wbuf))
+	if (IS_ERR(wbuf))
 		return PTR_ERR(wbuf);
 	spin_lock_irqsave(&hardware[type].lock, flags);
 	if (type == LIRC_IRDEO) {
@@ -981,6 +981,7 @@
 	}
 	off();
 	spin_unlock_irqrestore(&hardware[type].lock, flags);
+	kfree(wbuf);
 	return n;
 }
 
diff --git a/drivers/staging/lirc/lirc_sir.c b/drivers/staging/lirc/lirc_sir.c
index c553ab6..76be7b8 100644
--- a/drivers/staging/lirc/lirc_sir.c
+++ b/drivers/staging/lirc/lirc_sir.c
@@ -330,6 +330,7 @@
 	/* enable receiver */
 	Ser2UTCR3 = UTCR3_RXE|UTCR3_RIE;
 #endif
+	kfree(tx_buf);
 	return count;
 }
 
diff --git a/drivers/staging/lirc/lirc_zilog.c b/drivers/staging/lirc/lirc_zilog.c
index ad29bb1..0aad0d7 100644
--- a/drivers/staging/lirc/lirc_zilog.c
+++ b/drivers/staging/lirc/lirc_zilog.c
@@ -20,6 +20,9 @@
  *
  * parts are cut&pasted from the lirc_i2c.c driver
  *
+ * Numerous changes updating lirc_zilog.c in kernel 2.6.38 and later are
+ * Copyright (C) 2011 Andy Walls <awalls@md.metrocast.net>
+ *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
  *  the Free Software Foundation; either version 2 of the License, or
@@ -60,38 +63,44 @@
 #include <media/lirc_dev.h>
 #include <media/lirc.h>
 
-struct IR {
-	struct lirc_driver l;
-
-	/* Device info */
-	struct mutex ir_lock;
-	int open;
-	bool is_hdpvr;
-
+struct IR_rx {
 	/* RX device */
-	struct i2c_client c_rx;
-	int have_rx;
+	struct i2c_client *c;
 
 	/* RX device buffer & lock */
 	struct lirc_buffer buf;
 	struct mutex buf_lock;
 
 	/* RX polling thread data */
-	struct completion *t_notify;
-	struct completion *t_notify2;
-	int shutdown;
 	struct task_struct *task;
 
 	/* RX read data */
 	unsigned char b[3];
+	bool hdpvr_data_fmt;
+};
 
+struct IR_tx {
 	/* TX device */
-	struct i2c_client c_tx;
+	struct i2c_client *c;
+
+	/* TX additional actions needed */
 	int need_boot;
-	int have_tx;
+	bool post_tx_ready_poll;
+};
+
+struct IR {
+	struct lirc_driver l;
+
+	struct mutex ir_lock;
+	int open;
+
+	struct i2c_adapter *adapter;
+	struct IR_rx *rx;
+	struct IR_tx *tx;
 };
 
 /* Minor -> data mapping */
+static struct mutex ir_devices_lock;
 static struct IR *ir_devices[MAX_IRCTL_DEVICES];
 
 /* Block size for IR transmitter */
@@ -124,14 +133,11 @@
 #define zilog_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, \
 					## args)
 #define zilog_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
-
-#define ZILOG_HAUPPAUGE_IR_RX_NAME "Zilog/Hauppauge IR RX"
-#define ZILOG_HAUPPAUGE_IR_TX_NAME "Zilog/Hauppauge IR TX"
+#define zilog_info(s, args...) printk(KERN_INFO KBUILD_MODNAME ": " s, ## args)
 
 /* module parameters */
 static int debug;	/* debug output */
-static int disable_rx;	/* disable RX device */
-static int disable_tx;	/* disable TX device */
+static int tx_only;	/* only handle the IR Tx function */
 static int minor = -1;	/* minor number */
 
 #define dprintk(fmt, args...)						\
@@ -150,8 +156,12 @@
 	int ret;
 	int failures = 0;
 	unsigned char sendbuf[1] = { 0 };
+	struct IR_rx *rx = ir->rx;
 
-	if (lirc_buffer_full(&ir->buf)) {
+	if (rx == NULL)
+		return -ENXIO;
+
+	if (lirc_buffer_full(&rx->buf)) {
 		dprintk("buffer overflow\n");
 		return -EOVERFLOW;
 	}
@@ -161,17 +171,25 @@
 	 * data and we have space
 	 */
 	do {
+		if (kthread_should_stop())
+			return -ENODATA;
+
 		/*
 		 * Lock i2c bus for the duration.  RX/TX chips interfere so
 		 * this is worth it
 		 */
 		mutex_lock(&ir->ir_lock);
 
+		if (kthread_should_stop()) {
+			mutex_unlock(&ir->ir_lock);
+			return -ENODATA;
+		}
+
 		/*
 		 * Send random "poll command" (?)  Windows driver does this
 		 * and it is a good point to detect chip failure.
 		 */
-		ret = i2c_master_send(&ir->c_rx, sendbuf, 1);
+		ret = i2c_master_send(rx->c, sendbuf, 1);
 		if (ret != 1) {
 			zilog_error("i2c_master_send failed with %d\n",	ret);
 			if (failures >= 3) {
@@ -186,45 +204,53 @@
 				    "trying reset\n");
 
 			set_current_state(TASK_UNINTERRUPTIBLE);
+			if (kthread_should_stop()) {
+				mutex_unlock(&ir->ir_lock);
+				return -ENODATA;
+			}
 			schedule_timeout((100 * HZ + 999) / 1000);
-			ir->need_boot = 1;
+			ir->tx->need_boot = 1;
 
 			++failures;
 			mutex_unlock(&ir->ir_lock);
 			continue;
 		}
 
-		ret = i2c_master_recv(&ir->c_rx, keybuf, sizeof(keybuf));
+		if (kthread_should_stop()) {
+			mutex_unlock(&ir->ir_lock);
+			return -ENODATA;
+		}
+		ret = i2c_master_recv(rx->c, keybuf, sizeof(keybuf));
 		mutex_unlock(&ir->ir_lock);
 		if (ret != sizeof(keybuf)) {
 			zilog_error("i2c_master_recv failed with %d -- "
 				    "keeping last read buffer\n", ret);
 		} else {
-			ir->b[0] = keybuf[3];
-			ir->b[1] = keybuf[4];
-			ir->b[2] = keybuf[5];
-			dprintk("key (0x%02x/0x%02x)\n", ir->b[0], ir->b[1]);
+			rx->b[0] = keybuf[3];
+			rx->b[1] = keybuf[4];
+			rx->b[2] = keybuf[5];
+			dprintk("key (0x%02x/0x%02x)\n", rx->b[0], rx->b[1]);
 		}
 
 		/* key pressed ? */
-		if (ir->is_hdpvr) {
+		if (rx->hdpvr_data_fmt) {
 			if (got_data && (keybuf[0] == 0x80))
 				return 0;
 			else if (got_data && (keybuf[0] == 0x00))
 				return -ENODATA;
-		} else if ((ir->b[0] & 0x80) == 0)
+		} else if ((rx->b[0] & 0x80) == 0)
 			return got_data ? 0 : -ENODATA;
 
 		/* look what we have */
-		code = (((__u16)ir->b[0] & 0x7f) << 6) | (ir->b[1] >> 2);
+		code = (((__u16)rx->b[0] & 0x7f) << 6) | (rx->b[1] >> 2);
 
 		codes[0] = (code >> 8) & 0xff;
 		codes[1] = code & 0xff;
 
 		/* return it */
-		lirc_buffer_write(&ir->buf, codes);
+		lirc_buffer_write(&rx->buf, codes);
 		++got_data;
-	} while (!lirc_buffer_full(&ir->buf));
+	} while (!lirc_buffer_full(&rx->buf));
 
 	return 0;
 }
@@ -242,46 +268,35 @@
 static int lirc_thread(void *arg)
 {
 	struct IR *ir = arg;
-
-	if (ir->t_notify != NULL)
-		complete(ir->t_notify);
+	struct IR_rx *rx = ir->rx;
 
 	dprintk("poll thread started\n");
 
-	do {
-		if (ir->open) {
-			set_current_state(TASK_INTERRUPTIBLE);
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
 
-			/*
-			 * This is ~113*2 + 24 + jitter (2*repeat gap +
-			 * code length).  We use this interval as the chip
-			 * resets every time you poll it (bad!).  This is
-			 * therefore just sufficient to catch all of the
-			 * button presses.  It makes the remote much more
-			 * responsive.  You can see the difference by
-			 * running irw and holding down a button.  With
-			 * 100ms, the old polling interval, you'll notice
-			 * breaks in the repeat sequence corresponding to
-			 * lost keypresses.
-			 */
-			schedule_timeout((260 * HZ) / 1000);
-			if (ir->shutdown)
-				break;
-			if (!add_to_buf(ir))
-				wake_up_interruptible(&ir->buf.wait_poll);
-		} else {
-			/* if device not opened so we can sleep half a second */
-			set_current_state(TASK_INTERRUPTIBLE);
+		/* if device not opened, we can sleep half a second */
+		if (!ir->open) {
 			schedule_timeout(HZ/2);
+			continue;
 		}
-	} while (!ir->shutdown);
 
-	if (ir->t_notify2 != NULL)
-		wait_for_completion(ir->t_notify2);
-
-	ir->task = NULL;
-	if (ir->t_notify != NULL)
-		complete(ir->t_notify);
+		/*
+		 * This is ~113*2 + 24 + jitter (2*repeat gap + code length).
+		 * We use this interval as the chip resets every time you poll
+		 * it (bad!).  This is therefore just sufficient to catch all
+		 * of the button presses.  It makes the remote much more
+		 * responsive.  You can see the difference by running irw and
+		 * holding down a button.  With 100ms, the old polling
+		 * interval, you'll notice breaks in the repeat sequence
+		 * corresponding to lost keypresses.
+		 */
+		schedule_timeout((260 * HZ) / 1000);
+		if (kthread_should_stop())
+			break;
+		if (!add_to_buf(ir))
+			wake_up_interruptible(&rx->buf.wait_poll);
+	}
 
 	dprintk("poll thread ended\n");
 	return 0;
@@ -299,10 +314,10 @@
 	 * this is completely broken code. lirc_unregister_driver()
 	 * must be possible even when the device is open
 	 */
-	if (ir->c_rx.addr)
-		i2c_use_client(&ir->c_rx);
-	if (ir->c_tx.addr)
-		i2c_use_client(&ir->c_tx);
+	if (ir->rx != NULL)
+		i2c_use_client(ir->rx->c);
+	if (ir->tx != NULL)
+		i2c_use_client(ir->tx->c);
 
 	return 0;
 }
@@ -311,10 +326,10 @@
 {
 	struct IR *ir = data;
 
-	if (ir->c_rx.addr)
-		i2c_release_client(&ir->c_rx);
-	if (ir->c_tx.addr)
-		i2c_release_client(&ir->c_tx);
+	if (ir->rx)
+		i2c_release_client(ir->rx->c);
+	if (ir->tx)
+		i2c_release_client(ir->tx->c);
 	if (ir->l.owner != NULL)
 		module_put(ir->l.owner);
 }
@@ -453,7 +468,7 @@
 }
 
 /* send a block of data to the IR TX device */
-static int send_data_block(struct IR *ir, unsigned char *data_block)
+static int send_data_block(struct IR_tx *tx, unsigned char *data_block)
 {
 	int i, j, ret;
 	unsigned char buf[5];
@@ -467,7 +482,7 @@
 			buf[1 + j] = data_block[i + j];
 		dprintk("%02x %02x %02x %02x %02x",
 			buf[0], buf[1], buf[2], buf[3], buf[4]);
-		ret = i2c_master_send(&ir->c_tx, buf, tosend + 1);
+		ret = i2c_master_send(tx->c, buf, tosend + 1);
 		if (ret != tosend + 1) {
 			zilog_error("i2c_master_send failed with %d\n", ret);
 			return ret < 0 ? ret : -EFAULT;
@@ -478,38 +493,50 @@
 }
 
 /* send boot data to the IR TX device */
-static int send_boot_data(struct IR *ir)
+static int send_boot_data(struct IR_tx *tx)
 {
-	int ret;
+	int ret, i;
 	unsigned char buf[4];
 
 	/* send the boot block */
-	ret = send_data_block(ir, tx_data->boot_data);
+	ret = send_data_block(tx, tx_data->boot_data);
 	if (ret != 0)
 		return ret;
 
-	/* kick it off? */
+	/* Hit the go button to activate the new boot data */
 	buf[0] = 0x00;
 	buf[1] = 0x20;
-	ret = i2c_master_send(&ir->c_tx, buf, 2);
+	ret = i2c_master_send(tx->c, buf, 2);
 	if (ret != 2) {
 		zilog_error("i2c_master_send failed with %d\n", ret);
 		return ret < 0 ? ret : -EFAULT;
 	}
-	ret = i2c_master_send(&ir->c_tx, buf, 1);
+
+	/*
+	 * Wait for zilog to settle after hitting go post boot block upload.
+	 * Without this delay, the HD-PVR and HVR-1950 both return an -EIO
+	 * upon attempting to get firmware revision, and tx probe thus fails.
+	 */
+	for (i = 0; i < 10; i++) {
+		ret = i2c_master_send(tx->c, buf, 1);
+		if (ret == 1)
+			break;
+		udelay(100);
+	}
+
 	if (ret != 1) {
 		zilog_error("i2c_master_send failed with %d\n", ret);
 		return ret < 0 ? ret : -EFAULT;
 	}
 
 	/* Here comes the firmware version... (hopefully) */
-	ret = i2c_master_recv(&ir->c_tx, buf, 4);
+	ret = i2c_master_recv(tx->c, buf, 4);
 	if (ret != 4) {
 		zilog_error("i2c_master_recv failed with %d\n", ret);
 		return 0;
 	}
-	if (buf[0] != 0x80) {
-		zilog_error("unexpected IR TX response: %02x\n", buf[0]);
+	if ((buf[0] != 0x80) && (buf[0] != 0xa0)) {
+		zilog_error("unexpected IR TX init response: %02x\n", buf[0]);
 		return 0;
 	}
 	zilog_notify("Zilog/Hauppauge IR blaster firmware version "
@@ -543,7 +570,7 @@
 }
 
 /* load "firmware" for the IR TX device */
-static int fw_load(struct IR *ir)
+static int fw_load(struct IR_tx *tx)
 {
 	int ret;
 	unsigned int i;
@@ -558,7 +585,7 @@
 	}
 
 	/* Request codeset data file */
-	ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", &ir->c_tx.dev);
+	ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", &tx->c->dev);
 	if (ret != 0) {
 		zilog_error("firmware haup-ir-blaster.bin not available "
 			    "(%d)\n", ret);
@@ -685,20 +712,20 @@
 }
 
 /* initialise the IR TX device */
-static int tx_init(struct IR *ir)
+static int tx_init(struct IR_tx *tx)
 {
 	int ret;
 
 	/* Load 'firmware' */
-	ret = fw_load(ir);
+	ret = fw_load(tx);
 	if (ret != 0)
 		return ret;
 
 	/* Send boot block */
-	ret = send_boot_data(ir);
+	ret = send_boot_data(tx);
 	if (ret != 0)
 		return ret;
-	ir->need_boot = 0;
+	tx->need_boot = 0;
 
 	/* Looks good */
 	return 0;
@@ -714,20 +741,20 @@
 static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
 {
 	struct IR *ir = filep->private_data;
-	unsigned char buf[ir->buf.chunk_size];
+	struct IR_rx *rx = ir->rx;
 	int ret = 0, written = 0;
 	DECLARE_WAITQUEUE(wait, current);
 
 	dprintk("read called\n");
-	if (ir->c_rx.addr == 0)
+	if (rx == NULL)
 		return -ENODEV;
 
-	if (mutex_lock_interruptible(&ir->buf_lock))
+	if (mutex_lock_interruptible(&rx->buf_lock))
 		return -ERESTARTSYS;
 
-	if (n % ir->buf.chunk_size) {
+	if (n % rx->buf.chunk_size) {
 		dprintk("read result = -EINVAL\n");
-		mutex_unlock(&ir->buf_lock);
+		mutex_unlock(&rx->buf_lock);
 		return -EINVAL;
 	}
 
@@ -736,7 +763,7 @@
 	 * to avoid losing scan code (in case when queue is awaken somewhere
 	 * between while condition checking and scheduling)
 	 */
-	add_wait_queue(&ir->buf.wait_poll, &wait);
+	add_wait_queue(&rx->buf.wait_poll, &wait);
 	set_current_state(TASK_INTERRUPTIBLE);
 
 	/*
@@ -744,7 +771,7 @@
 	 * mode and 'copy_to_user' is happy, wait for data.
 	 */
 	while (written < n && ret == 0) {
-		if (lirc_buffer_empty(&ir->buf)) {
+		if (lirc_buffer_empty(&rx->buf)) {
 			/*
 			 * According to the read(2) man page, 'written' can be
 			 * returned as less than 'n', instead of blocking
@@ -764,16 +791,17 @@
 			schedule();
 			set_current_state(TASK_INTERRUPTIBLE);
 		} else {
-			lirc_buffer_read(&ir->buf, buf);
+			unsigned char buf[rx->buf.chunk_size];
+			lirc_buffer_read(&rx->buf, buf);
 			ret = copy_to_user((void *)outbuf+written, buf,
-					   ir->buf.chunk_size);
-			written += ir->buf.chunk_size;
+					   rx->buf.chunk_size);
+			written += rx->buf.chunk_size;
 		}
 	}
 
-	remove_wait_queue(&ir->buf.wait_poll, &wait);
+	remove_wait_queue(&rx->buf.wait_poll, &wait);
 	set_current_state(TASK_RUNNING);
-	mutex_unlock(&ir->buf_lock);
+	mutex_unlock(&rx->buf_lock);
 
 	dprintk("read result = %s (%d)\n",
 		ret ? "-EFAULT" : "OK", ret);
@@ -782,7 +810,7 @@
 }
 
 /* send a keypress to the IR TX device */
-static int send_code(struct IR *ir, unsigned int code, unsigned int key)
+static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
 {
 	unsigned char data_block[TX_BLOCK_SIZE];
 	unsigned char buf[2];
@@ -799,26 +827,34 @@
 		return ret;
 
 	/* Send the data block */
-	ret = send_data_block(ir, data_block);
+	ret = send_data_block(tx, data_block);
 	if (ret != 0)
 		return ret;
 
 	/* Send data block length? */
 	buf[0] = 0x00;
 	buf[1] = 0x40;
-	ret = i2c_master_send(&ir->c_tx, buf, 2);
+	ret = i2c_master_send(tx->c, buf, 2);
 	if (ret != 2) {
 		zilog_error("i2c_master_send failed with %d\n", ret);
 		return ret < 0 ? ret : -EFAULT;
 	}
-	ret = i2c_master_send(&ir->c_tx, buf, 1);
+
+	/* Give the z8 a moment to process data block */
+	for (i = 0; i < 10; i++) {
+		ret = i2c_master_send(tx->c, buf, 1);
+		if (ret == 1)
+			break;
+		udelay(100);
+	}
+
 	if (ret != 1) {
 		zilog_error("i2c_master_send failed with %d\n", ret);
 		return ret < 0 ? ret : -EFAULT;
 	}
 
 	/* Send finished download? */
-	ret = i2c_master_recv(&ir->c_tx, buf, 1);
+	ret = i2c_master_recv(tx->c, buf, 1);
 	if (ret != 1) {
 		zilog_error("i2c_master_recv failed with %d\n", ret);
 		return ret < 0 ? ret : -EFAULT;
@@ -832,7 +868,7 @@
 	/* Send prepare command? */
 	buf[0] = 0x00;
 	buf[1] = 0x80;
-	ret = i2c_master_send(&ir->c_tx, buf, 2);
+	ret = i2c_master_send(tx->c, buf, 2);
 	if (ret != 2) {
 		zilog_error("i2c_master_send failed with %d\n", ret);
 		return ret < 0 ? ret : -EFAULT;
@@ -843,7 +879,7 @@
 	 * last i2c_master_recv always fails with a -5, so for now, we're
 	 * going to skip this whole mess and say we're done on the HD PVR
 	 */
-	if (ir->is_hdpvr) {
+	if (!tx->post_tx_ready_poll) {
 		dprintk("sent code %u, key %u\n", code, key);
 		return 0;
 	}
@@ -857,7 +893,7 @@
 	for (i = 0; i < 20; ++i) {
 		set_current_state(TASK_UNINTERRUPTIBLE);
 		schedule_timeout((50 * HZ + 999) / 1000);
-		ret = i2c_master_send(&ir->c_tx, buf, 1);
+		ret = i2c_master_send(tx->c, buf, 1);
 		if (ret == 1)
 			break;
 		dprintk("NAK expected: i2c_master_send "
@@ -870,7 +906,7 @@
 	}
 
 	/* Seems to be an 'ok' response */
-	i = i2c_master_recv(&ir->c_tx, buf, 1);
+	i = i2c_master_recv(tx->c, buf, 1);
 	if (i != 1) {
 		zilog_error("i2c_master_recv failed with %d\n", ret);
 		return -EFAULT;
@@ -895,10 +931,11 @@
 			  loff_t *ppos)
 {
 	struct IR *ir = filep->private_data;
+	struct IR_tx *tx = ir->tx;
 	size_t i;
 	int failures = 0;
 
-	if (ir->c_tx.addr == 0)
+	if (tx == NULL)
 		return -ENODEV;
 
 	/* Validate user parameters */
@@ -919,15 +956,15 @@
 		}
 
 		/* Send boot data first if required */
-		if (ir->need_boot == 1) {
-			ret = send_boot_data(ir);
+		if (tx->need_boot == 1) {
+			ret = send_boot_data(tx);
 			if (ret == 0)
-				ir->need_boot = 0;
+				tx->need_boot = 0;
 		}
 
 		/* Send the code */
 		if (ret == 0) {
-			ret = send_code(ir, (unsigned)command >> 16,
+			ret = send_code(tx, (unsigned)command >> 16,
 					    (unsigned)command & 0xFFFF);
 			if (ret == -EPROTO) {
 				mutex_unlock(&ir->ir_lock);
@@ -952,7 +989,7 @@
 			}
 			set_current_state(TASK_UNINTERRUPTIBLE);
 			schedule_timeout((100 * HZ + 999) / 1000);
-			ir->need_boot = 1;
+			tx->need_boot = 1;
 			++failures;
 		} else
 			i += sizeof(int);
@@ -969,22 +1006,23 @@
 static unsigned int poll(struct file *filep, poll_table *wait)
 {
 	struct IR *ir = filep->private_data;
+	struct IR_rx *rx = ir->rx;
 	unsigned int ret;
 
 	dprintk("poll called\n");
-	if (ir->c_rx.addr == 0)
+	if (rx == NULL)
 		return -ENODEV;
 
-	mutex_lock(&ir->buf_lock);
+	mutex_lock(&rx->buf_lock);
 
-	poll_wait(filep, &ir->buf.wait_poll, wait);
+	poll_wait(filep, &rx->buf.wait_poll, wait);
 
 	dprintk("poll result = %s\n",
-		lirc_buffer_empty(&ir->buf) ? "0" : "POLLIN|POLLRDNORM");
+		lirc_buffer_empty(&rx->buf) ? "0" : "POLLIN|POLLRDNORM");
 
-	ret = lirc_buffer_empty(&ir->buf) ? 0 : (POLLIN|POLLRDNORM);
+	ret = lirc_buffer_empty(&rx->buf) ? 0 : (POLLIN|POLLRDNORM);
 
-	mutex_unlock(&ir->buf_lock);
+	mutex_unlock(&rx->buf_lock);
 	return ret;
 }
 
@@ -994,10 +1032,9 @@
 	int result;
 	unsigned long mode, features = 0;
 
-	if (ir->c_rx.addr != 0)
+	features |= LIRC_CAN_SEND_PULSE;
+	if (ir->rx != NULL)
 		features |= LIRC_CAN_REC_LIRCCODE;
-	if (ir->c_tx.addr != 0)
-		features |= LIRC_CAN_SEND_PULSE;
 
 	switch (cmd) {
 	case LIRC_GET_LENGTH:
@@ -1024,15 +1061,9 @@
 			result = -EINVAL;
 		break;
 	case LIRC_GET_SEND_MODE:
-		if (!(features&LIRC_CAN_SEND_MASK))
-			return -ENOSYS;
-
 		result = put_user(LIRC_MODE_PULSE, (unsigned long *) arg);
 		break;
 	case LIRC_SET_SEND_MODE:
-		if (!(features&LIRC_CAN_SEND_MASK))
-			return -ENOSYS;
-
 		result = get_user(mode, (unsigned long *) arg);
 		if (!result && mode != LIRC_MODE_PULSE)
 			return -EINVAL;
@@ -1043,6 +1074,15 @@
 	return result;
 }
 
+/* ir_devices_lock must be held */
+static struct IR *find_ir_device_by_minor(unsigned int minor)
+{
+	if (minor >= MAX_IRCTL_DEVICES)
+		return NULL;
+
+	return ir_devices[minor];
+}
+
 /*
  * Open the IR device.  Get hold of our IR structure and
  * stash it in private_data for the file
@@ -1051,15 +1091,15 @@
 {
 	struct IR *ir;
 	int ret;
+	unsigned int minor = MINOR(node->i_rdev);
 
 	/* find our IR struct */
-	unsigned minor = MINOR(node->i_rdev);
-	if (minor >= MAX_IRCTL_DEVICES) {
-		dprintk("minor %d: open result = -ENODEV\n",
-			minor);
+	mutex_lock(&ir_devices_lock);
+	ir = find_ir_device_by_minor(minor);
+	mutex_unlock(&ir_devices_lock);
+
+	if (ir == NULL)
 		return -ENODEV;
-	}
-	ir = ir_devices[minor];
 
 	/* increment in use count */
 	mutex_lock(&ir->ir_lock);
@@ -1106,7 +1146,6 @@
 
 static int ir_remove(struct i2c_client *client);
 static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id);
-static int ir_command(struct i2c_client *client, unsigned int cmd, void *arg);
 
 #define ID_FLAG_TX	0x01
 #define ID_FLAG_HDPVR	0x02
@@ -1126,7 +1165,6 @@
 	},
 	.probe		= ir_probe,
 	.remove		= ir_remove,
-	.command	= ir_command,
 	.id_table	= ir_transceiver_id,
 };
 
@@ -1144,214 +1182,253 @@
 	.release	= close
 };
 
+static void destroy_rx_kthread(struct IR_rx *rx)
+{
+	/* end up polling thread */
+	if (rx != NULL && !IS_ERR_OR_NULL(rx->task)) {
+		kthread_stop(rx->task);
+		rx->task = NULL;
+	}
+}
+
+/* ir_devices_lock must be held */
+static int add_ir_device(struct IR *ir)
+{
+	int i;
+
+	for (i = 0; i < MAX_IRCTL_DEVICES; i++)
+		if (ir_devices[i] == NULL) {
+			ir_devices[i] = ir;
+			break;
+		}
+
+	return i == MAX_IRCTL_DEVICES ? -ENOMEM : i;
+}
+
+/* ir_devices_lock must be held */
+static void del_ir_device(struct IR *ir)
+{
+	int i;
+
+	for (i = 0; i < MAX_IRCTL_DEVICES; i++)
+		if (ir_devices[i] == ir) {
+			ir_devices[i] = NULL;
+			break;
+		}
+}
+
 static int ir_remove(struct i2c_client *client)
 {
 	struct IR *ir = i2c_get_clientdata(client);
 
-	mutex_lock(&ir->ir_lock);
+	mutex_lock(&ir_devices_lock);
 
-	if (ir->have_rx || ir->have_tx) {
-		DECLARE_COMPLETION(tn);
-		DECLARE_COMPLETION(tn2);
-
-		/* end up polling thread */
-		if (ir->task && !IS_ERR(ir->task)) {
-			ir->t_notify = &tn;
-			ir->t_notify2 = &tn2;
-			ir->shutdown = 1;
-			wake_up_process(ir->task);
-			complete(&tn2);
-			wait_for_completion(&tn);
-			ir->t_notify = NULL;
-			ir->t_notify2 = NULL;
-		}
-
-	} else {
-		mutex_unlock(&ir->ir_lock);
-		zilog_error("%s: detached from something we didn't "
-			    "attach to\n", __func__);
-		return -ENODEV;
+	if (ir == NULL) {
+		/* We destroyed everything when the first client came through */
+		mutex_unlock(&ir_devices_lock);
+		return 0;
 	}
 
-	/* unregister lirc driver */
-	if (ir->l.minor >= 0 && ir->l.minor < MAX_IRCTL_DEVICES) {
-		lirc_unregister_driver(ir->l.minor);
-		ir_devices[ir->l.minor] = NULL;
+	/* Good-bye LIRC */
+	lirc_unregister_driver(ir->l.minor);
+
+	/* Good-bye Rx */
+	destroy_rx_kthread(ir->rx);
+	if (ir->rx != NULL) {
+		if (ir->rx->buf.fifo_initialized)
+			lirc_buffer_free(&ir->rx->buf);
+		i2c_set_clientdata(ir->rx->c, NULL);
+		kfree(ir->rx);
 	}
 
-	/* free memory */
-	lirc_buffer_free(&ir->buf);
-	mutex_unlock(&ir->ir_lock);
+	/* Good-bye Tx */
+	i2c_set_clientdata(ir->tx->c, NULL);
+	kfree(ir->tx);
+
+	/* Good-bye IR */
+	del_ir_device(ir);
 	kfree(ir);
 
+	mutex_unlock(&ir_devices_lock);
 	return 0;
 }
 
+
+/* ir_devices_lock must be held */
+static struct IR *find_ir_device_by_adapter(struct i2c_adapter *adapter)
+{
+	int i;
+	struct IR *ir = NULL;
+
+	for (i = 0; i < MAX_IRCTL_DEVICES; i++)
+		if (ir_devices[i] != NULL &&
+		    ir_devices[i]->adapter == adapter) {
+			ir = ir_devices[i];
+			break;
+		}
+
+	return ir;
+}
+
 static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
 {
-	struct IR *ir = NULL;
+	struct IR *ir;
 	struct i2c_adapter *adap = client->adapter;
-	char buf;
 	int ret;
-	int have_rx = 0, have_tx = 0;
+	bool tx_probe = false;
 
-	dprintk("%s: adapter name (%s) nr %d, i2c_device_id name (%s), "
-		"client addr=0x%02x\n",
-		__func__, adap->name, adap->nr, id->name, client->addr);
+	dprintk("%s: %s on i2c-%d (%s), client addr=0x%02x\n",
+		__func__, id->name, adap->nr, adap->name, client->addr);
 
 	/*
-	 * FIXME - This probe function probes both the Tx and Rx
-	 * addresses of the IR microcontroller.
-	 *
-	 * However, the I2C subsystem is passing along one I2C client at a
-	 * time, based on matches to the ir_transceiver_id[] table above.
-	 * The expectation is that each i2c_client address will be probed
-	 * individually by drivers so the I2C subsystem can mark all client
-	 * addresses as claimed or not.
-	 *
-	 * This probe routine causes only one of the client addresses, TX or RX,
-	 * to be claimed.  This will cause a problem if the I2C subsystem is
-	 * subsequently triggered to probe unclaimed clients again.
+	 * The IR receiver    is at i2c address 0x71.
+	 * The IR transmitter is at i2c address 0x70.
 	 */
-	/*
-	 * The external IR receiver is at i2c address 0x71.
-	 * The IR transmitter is at 0x70.
-	 */
-	client->addr = 0x70;
 
-	if (!disable_tx) {
-		if (i2c_master_recv(client, &buf, 1) == 1)
-			have_tx = 1;
-		dprintk("probe 0x70 @ %s: %s\n",
-			adap->name, have_tx ? "success" : "failed");
+	if (id->driver_data & ID_FLAG_TX)
+		tx_probe = true;
+	else if (tx_only) /* module option */
+		return -ENXIO;
+
+	zilog_info("probing IR %s on %s (i2c-%d)\n",
+		   tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
+
+	mutex_lock(&ir_devices_lock);
+
+	/* Use a single struct IR instance for both the Rx and Tx functions */
+	ir = find_ir_device_by_adapter(adap);
+	if (ir == NULL) {
+		ir = kzalloc(sizeof(struct IR), GFP_KERNEL);
+		if (ir == NULL) {
+			ret = -ENOMEM;
+			goto out_no_ir;
+		}
+		/* store for use in ir_probe() again, and open() later on */
+		ret = add_ir_device(ir);
+		if (ret)
+			goto out_free_ir;
+
+		ir->adapter = adap;
+		mutex_init(&ir->ir_lock);
+
+		/* set lirc_dev stuff */
+		memcpy(&ir->l, &lirc_template, sizeof(struct lirc_driver));
+		ir->l.minor       = minor; /* module option */
+		ir->l.code_length = 13;
+		ir->l.rbuf	  = NULL;
+		ir->l.fops	  = &lirc_fops;
+		ir->l.data	  = ir;
+		ir->l.dev         = &adap->dev;
+		ir->l.sample_rate = 0;
 	}
 
-	if (!disable_rx) {
-		client->addr = 0x71;
-		if (i2c_master_recv(client, &buf, 1) == 1)
-			have_rx = 1;
-		dprintk("probe 0x71 @ %s: %s\n",
-			adap->name, have_rx ? "success" : "failed");
+	if (tx_probe) {
+		/* Set up a struct IR_tx instance */
+		ir->tx = kzalloc(sizeof(struct IR_tx), GFP_KERNEL);
+		if (ir->tx == NULL) {
+			ret = -ENOMEM;
+			goto out_free_xx;
+		}
+
+		ir->tx->c = client;
+		ir->tx->need_boot = 1;
+		ir->tx->post_tx_ready_poll =
+			       (id->driver_data & ID_FLAG_HDPVR) ? false : true;
+	} else {
+		/* Set up a struct IR_rx instance */
+		ir->rx = kzalloc(sizeof(struct IR_rx), GFP_KERNEL);
+		if (ir->rx == NULL) {
+			ret = -ENOMEM;
+			goto out_free_xx;
+		}
+
+		ret = lirc_buffer_init(&ir->rx->buf, 2, BUFLEN / 2);
+		if (ret)
+			goto out_free_xx;
+
+		mutex_init(&ir->rx->buf_lock);
+		ir->rx->c = client;
+		ir->rx->hdpvr_data_fmt =
+			       (id->driver_data & ID_FLAG_HDPVR) ? true : false;
+
+		/* set lirc_dev stuff */
+		ir->l.rbuf = &ir->rx->buf;
 	}
 
-	if (!(have_rx || have_tx)) {
-		zilog_error("%s: no devices found\n", adap->name);
-		goto out_nodev;
-	}
-
-	printk(KERN_INFO "lirc_zilog: chip found with %s\n",
-		have_rx && have_tx ? "RX and TX" :
-			have_rx ? "RX only" : "TX only");
-
-	ir = kzalloc(sizeof(struct IR), GFP_KERNEL);
-
-	if (!ir)
-		goto out_nomem;
-
-	ret = lirc_buffer_init(&ir->buf, 2, BUFLEN / 2);
-	if (ret)
-		goto out_nomem;
-
-	mutex_init(&ir->ir_lock);
-	mutex_init(&ir->buf_lock);
-	ir->need_boot = 1;
-	ir->is_hdpvr = (id->driver_data & ID_FLAG_HDPVR) ? true : false;
-
-	memcpy(&ir->l, &lirc_template, sizeof(struct lirc_driver));
-	ir->l.minor = -1;
-
-	/* I2C attach to device */
 	i2c_set_clientdata(client, ir);
 
+	/* Proceed only if we have the required Tx and Rx clients ready to go */
+	if (ir->tx == NULL ||
+	    (ir->rx == NULL && !tx_only)) {
+		zilog_info("probe of IR %s on %s (i2c-%d) done. Waiting on "
+			   "IR %s.\n", tx_probe ? "Tx" : "Rx", adap->name,
+			   adap->nr, tx_probe ? "Rx" : "Tx");
+		goto out_ok;
+	}
+
 	/* initialise RX device */
-	if (have_rx) {
-		DECLARE_COMPLETION(tn);
-		memcpy(&ir->c_rx, client, sizeof(struct i2c_client));
-
-		ir->c_rx.addr = 0x71;
-		strlcpy(ir->c_rx.name, ZILOG_HAUPPAUGE_IR_RX_NAME,
-			I2C_NAME_SIZE);
-
+	if (ir->rx != NULL) {
 		/* try to fire up polling thread */
-		ir->t_notify = &tn;
-		ir->task = kthread_run(lirc_thread, ir, "lirc_zilog");
-		if (IS_ERR(ir->task)) {
-			ret = PTR_ERR(ir->task);
-			zilog_error("lirc_register_driver: cannot run "
-				    "poll thread %d\n", ret);
-			goto err;
+		ir->rx->task = kthread_run(lirc_thread, ir,
+					   "zilog-rx-i2c-%d", adap->nr);
+		if (IS_ERR(ir->rx->task)) {
+			ret = PTR_ERR(ir->rx->task);
+			zilog_error("%s: could not start IR Rx polling thread"
+				    "\n", __func__);
+			goto out_free_xx;
 		}
-		wait_for_completion(&tn);
-		ir->t_notify = NULL;
-		ir->have_rx = 1;
 	}
 
-	/* initialise TX device */
-	if (have_tx) {
-		memcpy(&ir->c_tx, client, sizeof(struct i2c_client));
-		ir->c_tx.addr = 0x70;
-		strlcpy(ir->c_tx.name, ZILOG_HAUPPAUGE_IR_TX_NAME,
-			I2C_NAME_SIZE);
-		ir->have_tx = 1;
-	}
-
-	/* set lirc_dev stuff */
-	ir->l.code_length = 13;
-	ir->l.rbuf	  = &ir->buf;
-	ir->l.fops	  = &lirc_fops;
-	ir->l.data	  = ir;
-	ir->l.minor       = minor;
-	ir->l.dev         = &adap->dev;
-	ir->l.sample_rate = 0;
-
 	/* register with lirc */
 	ir->l.minor = lirc_register_driver(&ir->l);
 	if (ir->l.minor < 0 || ir->l.minor >= MAX_IRCTL_DEVICES) {
-		zilog_error("ir_attach: \"minor\" must be between 0 and %d "
-			    "(%d)!\n", MAX_IRCTL_DEVICES-1, ir->l.minor);
+		zilog_error("%s: \"minor\" must be between 0 and %d (%d)!\n",
+			    __func__, MAX_IRCTL_DEVICES-1, ir->l.minor);
 		ret = -EBADRQC;
-		goto err;
+		goto out_free_thread;
 	}
 
-	/* store this for getting back in open() later on */
-	ir_devices[ir->l.minor] = ir;
-
 	/*
 	 * if we have the tx device, load the 'firmware'.  We do this
 	 * after registering with lirc as otherwise hotplug seems to take
 	 * 10s to create the lirc device.
 	 */
-	if (have_tx) {
-		/* Special TX init */
-		ret = tx_init(ir);
-		if (ret != 0)
-			goto err;
+	ret = tx_init(ir->tx);
+	if (ret != 0)
+		goto out_unregister;
+
+	zilog_info("probe of IR %s on %s (i2c-%d) done. IR unit ready.\n",
+		   tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
+out_ok:
+	mutex_unlock(&ir_devices_lock);
+	return 0;
+
+out_unregister:
+	lirc_unregister_driver(ir->l.minor);
+out_free_thread:
+	destroy_rx_kthread(ir->rx);
+out_free_xx:
+	if (ir->rx != NULL) {
+		if (ir->rx->buf.fifo_initialized)
+			lirc_buffer_free(&ir->rx->buf);
+		if (ir->rx->c != NULL)
+			i2c_set_clientdata(ir->rx->c, NULL);
+		kfree(ir->rx);
 	}
-
-	return 0;
-
-err:
-	/* undo everything, hopefully... */
-	if (ir->c_rx.addr)
-		ir_remove(&ir->c_rx);
-	if (ir->c_tx.addr)
-		ir_remove(&ir->c_tx);
-	return ret;
-
-out_nodev:
-	zilog_error("no device found\n");
-	return -ENODEV;
-
-out_nomem:
-	zilog_error("memory allocation failure\n");
+	if (ir->tx != NULL) {
+		if (ir->tx->c != NULL)
+			i2c_set_clientdata(ir->tx->c, NULL);
+		kfree(ir->tx);
+	}
+out_free_ir:
+	del_ir_device(ir);
 	kfree(ir);
-	return -ENOMEM;
-}
-
-static int ir_command(struct i2c_client *client, unsigned int cmd, void *arg)
-{
-	/* nothing */
-	return 0;
+out_no_ir:
+	zilog_error("%s: probing IR %s on %s (i2c-%d) failed with %d\n",
+		    __func__, tx_probe ? "Tx" : "Rx", adap->name, adap->nr,
+		   ret);
+	mutex_unlock(&ir_devices_lock);
+	return ret;
 }
 
 static int __init zilog_init(void)
@@ -1361,6 +1438,7 @@
 	zilog_notify("Zilog/Hauppauge IR driver initializing\n");
 
 	mutex_init(&tx_data_lock);
+	mutex_init(&ir_devices_lock);
 
 	request_module("firmware_class");
 
@@ -1386,7 +1464,8 @@
 
 MODULE_DESCRIPTION("Zilog/Hauppauge infrared transmitter driver (i2c stack)");
 MODULE_AUTHOR("Gerd Knorr, Michal Kochanowicz, Christoph Bartelmus, "
-	      "Ulrich Mueller, Stefan Jahn, Jerome Brock, Mark Weaver");
+	      "Ulrich Mueller, Stefan Jahn, Jerome Brock, Mark Weaver, "
+	      "Andy Walls");
 MODULE_LICENSE("GPL");
 /* for compat with old name, which isn't all that accurate anymore */
 MODULE_ALIAS("lirc_pvr150");
@@ -1397,8 +1476,5 @@
 module_param(debug, bool, 0644);
 MODULE_PARM_DESC(debug, "Enable debugging messages");
 
-module_param(disable_rx, bool, 0644);
-MODULE_PARM_DESC(disable_rx, "Disable the IR receiver device");
-
-module_param(disable_tx, bool, 0644);
-MODULE_PARM_DESC(disable_tx, "Disable the IR transmitter device");
+module_param(tx_only, bool, 0644);
+MODULE_PARM_DESC(tx_only, "Only handle the IR transmit function");
diff --git a/drivers/staging/msm/msm_fb.c b/drivers/staging/msm/msm_fb.c
index 23fa049..a2f29d4 100644
--- a/drivers/staging/msm/msm_fb.c
+++ b/drivers/staging/msm/msm_fb.c
@@ -347,7 +347,7 @@
 	if ((!mfd) || (mfd->key != MFD_KEY))
 		return 0;
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(mfd->fbi, 1);
 
 	ret = msm_fb_suspend_sub(mfd);
@@ -358,7 +358,7 @@
 		pdev->dev.power.power_state = state;
 	}
 
-	release_console_sem();
+	console_unlock();
 	return ret;
 }
 #else
@@ -431,11 +431,11 @@
 	if ((!mfd) || (mfd->key != MFD_KEY))
 		return 0;
 
-	acquire_console_sem();
+	console_lock();
 	ret = msm_fb_resume_sub(mfd);
 	pdev->dev.power.power_state = PMSG_ON;
 	fb_set_suspend(mfd->fbi, 1);
-	release_console_sem();
+	console_unlock();
 
 	return ret;
 }
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 9f26dc9..56a283d 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -373,17 +373,17 @@
 		 *
 		 * For now, we just hope..
 		 */
-		acquire_console_sem();
+		console_lock();
 		ignore_fb_events = 1;
 		if (fb_blank(fbinfo, FB_BLANK_UNBLANK)) {
 			ignore_fb_events = 0;
-			release_console_sem();
+			console_unlock();
 			printk(KERN_ERR "olpc-dcon:  Failed to enter CPU mode\n");
 			dcon_pending = DCON_SOURCE_DCON;
 			return;
 		}
 		ignore_fb_events = 0;
-		release_console_sem();
+		console_unlock();
 
 		/* And turn off the DCON */
 		pdata->set_dconload(1);
@@ -435,12 +435,12 @@
 			}
 		}
 
-		acquire_console_sem();
+		console_lock();
 		ignore_fb_events = 1;
 		if (fb_blank(fbinfo, FB_BLANK_POWERDOWN))
 			printk(KERN_ERR "olpc-dcon:  couldn't blank fb!\n");
 		ignore_fb_events = 0;
-		release_console_sem();
+		console_unlock();
 
 		printk(KERN_INFO "olpc-dcon: The DCON has control\n");
 		break;
diff --git a/drivers/staging/rt2860/rt_main_dev.c b/drivers/staging/rt2860/rt_main_dev.c
index 701561d..236dd36 100644
--- a/drivers/staging/rt2860/rt_main_dev.c
+++ b/drivers/staging/rt2860/rt_main_dev.c
@@ -484,8 +484,6 @@
 	net_dev->ml_priv = (void *)pAd;
 	pAd->net_dev = net_dev;
 
-	netif_stop_queue(net_dev);
-
 	return net_dev;
 
 }
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index ee68d51..322bf49 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -106,6 +106,7 @@
 	{USB_DEVICE(0x0411, 0x016f)},	/* MelCo.,Inc. WLI-UC-G301N */
 	{USB_DEVICE(0x1737, 0x0070)},	/* Linksys WUSB100 */
 	{USB_DEVICE(0x1737, 0x0071)},	/* Linksys WUSB600N */
+	{USB_DEVICE(0x1737, 0x0078)},	/* Linksys WUSB100v2 */
 	{USB_DEVICE(0x0411, 0x00e8)},	/* Buffalo WLI-UC-G300N */
 	{USB_DEVICE(0x050d, 0x815c)},	/* Belkin F5D8053 */
 	{USB_DEVICE(0x100D, 0x9031)},	/* Motorola 2770 */
diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
index 32088a6..84be383 100644
--- a/drivers/staging/rtl8712/hal_init.c
+++ b/drivers/staging/rtl8712/hal_init.c
@@ -128,12 +128,13 @@
 	u8 *ptmpchar = NULL, *ppayload, *ptr;
 	struct tx_desc *ptx_desc;
 	u32 txdscp_sz = sizeof(struct tx_desc);
+	u8 ret = _FAIL;
 
 	ulfilelength = rtl871x_open_fw(padapter, &phfwfile_hdl, &pmappedfw);
 	if (pmappedfw && (ulfilelength > 0)) {
 		update_fwhdr(&fwhdr, pmappedfw);
 		if (chk_fwhdr(&fwhdr, ulfilelength) == _FAIL)
-			goto exit_fail;
+			goto firmware_rel;
 		fill_fwpriv(padapter, &fwhdr.fwpriv);
 		/* firmware check ok */
 		maxlen = (fwhdr.img_IMEM_size > fwhdr.img_SRAM_size) ?
@@ -141,7 +142,7 @@
 		maxlen += txdscp_sz;
 		ptmpchar = _malloc(maxlen + FWBUFF_ALIGN_SZ);
 		if (ptmpchar == NULL)
-			return _FAIL;
+			goto firmware_rel;
 
 		ptx_desc = (struct tx_desc *)(ptmpchar + FWBUFF_ALIGN_SZ -
 			    ((addr_t)(ptmpchar) & (FWBUFF_ALIGN_SZ - 1)));
@@ -273,11 +274,13 @@
 			goto exit_fail;
 	} else
 		goto exit_fail;
-	return _SUCCESS;
+	ret = _SUCCESS;
 
 exit_fail:
 	kfree(ptmpchar);
-	return _FAIL;
+firmware_rel:
+	release_firmware((struct firmware *)phfwfile_hdl);
+	return ret;
 }
 
 uint rtl8712_hal_init(struct _adapter *padapter)
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index a692ee8..21ce2af 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -47,54 +47,123 @@
 static void r871xu_dev_remove(struct usb_interface *pusb_intf);
 
 static struct usb_device_id rtl871x_usb_id_tbl[] = {
-	/*92SU
-	 * Realtek */
-	{USB_DEVICE(0x0bda, 0x8171)},
-	{USB_DEVICE(0x0bda, 0x8172)},
+
+/* RTL8188SU */
+	/* Realtek */
+	{USB_DEVICE(0x0BDA, 0x8171)},
 	{USB_DEVICE(0x0bda, 0x8173)},
-	{USB_DEVICE(0x0bda, 0x8174)},
 	{USB_DEVICE(0x0bda, 0x8712)},
 	{USB_DEVICE(0x0bda, 0x8713)},
 	{USB_DEVICE(0x0bda, 0xC512)},
-	/* Abocom  */
+	/* Abocom */
 	{USB_DEVICE(0x07B8, 0x8188)},
-	/* Corega */
-	{USB_DEVICE(0x07aa, 0x0047)},
-	/* Dlink */
-	{USB_DEVICE(0x07d1, 0x3303)},
-	{USB_DEVICE(0x07d1, 0x3302)},
-	{USB_DEVICE(0x07d1, 0x3300)},
-	/* Dlink for Skyworth */
-	{USB_DEVICE(0x14b2, 0x3300)},
-	{USB_DEVICE(0x14b2, 0x3301)},
-	{USB_DEVICE(0x14b2, 0x3302)},
-	/* EnGenius */
-	{USB_DEVICE(0x1740, 0x9603)},
-	{USB_DEVICE(0x1740, 0x9605)},
+	/* ASUS */
+	{USB_DEVICE(0x0B05, 0x1786)},
+	{USB_DEVICE(0x0B05, 0x1791)}, /* 11n mode disable */
 	/* Belkin */
-	{USB_DEVICE(0x050d, 0x815F)},
-	{USB_DEVICE(0x050d, 0x945A)},
-	{USB_DEVICE(0x050d, 0x845A)},
-	/* Guillemot */
-	{USB_DEVICE(0x06f8, 0xe031)},
+	{USB_DEVICE(0x050D, 0x945A)},
+	/* Corega */
+	{USB_DEVICE(0x07AA, 0x0047)},
+	/* D-Link */
+	{USB_DEVICE(0x2001, 0x3306)},
+	{USB_DEVICE(0x07D1, 0x3306)}, /* 11n mode disable */
 	/* Edimax */
 	{USB_DEVICE(0x7392, 0x7611)},
-	{USB_DEVICE(0x7392, 0x7612)},
-	{USB_DEVICE(0x7392, 0x7622)},
+	/* EnGenius */
+	{USB_DEVICE(0x1740, 0x9603)},
+	/* Hawking */
+	{USB_DEVICE(0x0E66, 0x0016)},
+	/* Hercules */
+	{USB_DEVICE(0x06F8, 0xE034)},
+	{USB_DEVICE(0x06F8, 0xE032)},
+	/* Logitec */
+	{USB_DEVICE(0x0789, 0x0167)},
+	/* PCI */
+	{USB_DEVICE(0x2019, 0xAB28)},
+	{USB_DEVICE(0x2019, 0xED16)},
 	/* Sitecom */
+	{USB_DEVICE(0x0DF6, 0x0057)},
 	{USB_DEVICE(0x0DF6, 0x0045)},
+	{USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
+	{USB_DEVICE(0x0DF6, 0x004B)},
+	{USB_DEVICE(0x0DF6, 0x0063)},
+	/* Sweex */
+	{USB_DEVICE(0x177F, 0x0154)},
+	/* Thinkware */
+	{USB_DEVICE(0x0BDA, 0x5077)},
+	/* Toshiba */
+	{USB_DEVICE(0x1690, 0x0752)},
+	/* - */
+	{USB_DEVICE(0x20F4, 0x646B)},
+	{USB_DEVICE(0x083A, 0xC512)},
+
+/* RTL8191SU */
+	/* Realtek */
+	{USB_DEVICE(0x0BDA, 0x8172)},
+	/* Amigo */
+	{USB_DEVICE(0x0EB0, 0x9061)},
+	/* ASUS/EKB */
+	{USB_DEVICE(0x0BDA, 0x8172)},
+	{USB_DEVICE(0x13D3, 0x3323)},
+	{USB_DEVICE(0x13D3, 0x3311)}, /* 11n mode disable */
+	{USB_DEVICE(0x13D3, 0x3342)},
+	/* ASUS/EKBLenovo */
+	{USB_DEVICE(0x13D3, 0x3333)},
+	{USB_DEVICE(0x13D3, 0x3334)},
+	{USB_DEVICE(0x13D3, 0x3335)}, /* 11n mode disable */
+	{USB_DEVICE(0x13D3, 0x3336)}, /* 11n mode disable */
+	/* ASUS/Media BOX */
+	{USB_DEVICE(0x13D3, 0x3309)},
+	/* Belkin */
+	{USB_DEVICE(0x050D, 0x815F)},
+	/* D-Link */
+	{USB_DEVICE(0x07D1, 0x3302)},
+	{USB_DEVICE(0x07D1, 0x3300)},
+	{USB_DEVICE(0x07D1, 0x3303)},
+	/* Edimax */
+	{USB_DEVICE(0x7392, 0x7612)},
+	/* EnGenius */
+	{USB_DEVICE(0x1740, 0x9605)},
+	/* Guillemot */
+	{USB_DEVICE(0x06F8, 0xE031)},
 	/* Hawking */
 	{USB_DEVICE(0x0E66, 0x0015)},
-	{USB_DEVICE(0x0E66, 0x0016)},
-	{USB_DEVICE(0x0b05, 0x1786)},
-	{USB_DEVICE(0x0b05, 0x1791)},    /* 11n mode disable */
-
+	/* Mediao */
 	{USB_DEVICE(0x13D3, 0x3306)},
-	{USB_DEVICE(0x13D3, 0x3309)},
+	/* PCI */
+	{USB_DEVICE(0x2019, 0xED18)},
+	{USB_DEVICE(0x2019, 0x4901)},
+	/* Sitecom */
+	{USB_DEVICE(0x0DF6, 0x0058)},
+	{USB_DEVICE(0x0DF6, 0x0049)},
+	{USB_DEVICE(0x0DF6, 0x004C)},
+	{USB_DEVICE(0x0DF6, 0x0064)},
+	/* Skyworth */
+	{USB_DEVICE(0x14b2, 0x3300)},
+	{USB_DEVICE(0x14b2, 0x3301)},
+	{USB_DEVICE(0x14B2, 0x3302)},
+	/* - */
+	{USB_DEVICE(0x04F2, 0xAFF2)},
+	{USB_DEVICE(0x04F2, 0xAFF5)},
+	{USB_DEVICE(0x04F2, 0xAFF6)},
+	{USB_DEVICE(0x13D3, 0x3339)},
+	{USB_DEVICE(0x13D3, 0x3340)}, /* 11n mode disable */
+	{USB_DEVICE(0x13D3, 0x3341)}, /* 11n mode disable */
 	{USB_DEVICE(0x13D3, 0x3310)},
-	{USB_DEVICE(0x13D3, 0x3311)},    /* 11n mode disable */
 	{USB_DEVICE(0x13D3, 0x3325)},
-	{USB_DEVICE(0x083A, 0xC512)},
+
+/* RTL8192SU */
+	/* Realtek */
+	{USB_DEVICE(0x0BDA, 0x8174)},
+	{USB_DEVICE(0x0BDA, 0x8174)},
+	/* Belkin */
+	{USB_DEVICE(0x050D, 0x845A)},
+	/* Corega */
+	{USB_DEVICE(0x07AA, 0x0051)},
+	/* Edimax */
+	{USB_DEVICE(0x7392, 0x7622)},
+	/* NEC */
+	{USB_DEVICE(0x0409, 0x02B6)},
 	{}
 };
 
@@ -103,8 +172,20 @@
 static struct specific_device_id specific_device_id_tbl[] = {
 	{.idVendor = 0x0b05, .idProduct = 0x1791,
 		 .flags = SPEC_DEV_ID_DISABLE_HT},
+	{.idVendor = 0x0df6, .idProduct = 0x0059,
+		 .flags = SPEC_DEV_ID_DISABLE_HT},
+	{.idVendor = 0x13d3, .idProduct = 0x3306,
+		 .flags = SPEC_DEV_ID_DISABLE_HT},
 	{.idVendor = 0x13D3, .idProduct = 0x3311,
 		 .flags = SPEC_DEV_ID_DISABLE_HT},
+	{.idVendor = 0x13d3, .idProduct = 0x3335,
+		 .flags = SPEC_DEV_ID_DISABLE_HT},
+	{.idVendor = 0x13d3, .idProduct = 0x3336,
+		 .flags = SPEC_DEV_ID_DISABLE_HT},
+	{.idVendor = 0x13d3, .idProduct = 0x3340,
+		 .flags = SPEC_DEV_ID_DISABLE_HT},
+	{.idVendor = 0x13d3, .idProduct = 0x3341,
+		 .flags = SPEC_DEV_ID_DISABLE_HT},
 	{}
 };
 
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
index 0bc113c..d007e4a 100644
--- a/drivers/staging/sm7xx/smtcfb.c
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -1044,9 +1044,9 @@
 
 	/* when doing suspend, call fb apis and pci apis */
 	if (msg.event == PM_EVENT_SUSPEND) {
-		acquire_console_sem();
+		console_lock();
 		fb_set_suspend(&sfb->fb, 1);
-		release_console_sem();
+		console_unlock();
 		retv = pci_save_state(pdev);
 		pci_disable_device(pdev);
 		retv = pci_choose_state(pdev, msg);
@@ -1105,9 +1105,9 @@
 
 	smtcfb_setmode(sfb);
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(&sfb->fb, 0);
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index 408bb9b..07a7f54 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -332,7 +332,7 @@
 	unsigned long flags;
 
 	len = strlen(buf);
-	if (len > 0 || len < 3) {
+	if (len > 0 && len < 3) {
 		ch = buf[0];
 		if (ch == '\n')
 			ch = '0';
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
index e8f047e..80183a7 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
@@ -986,12 +986,6 @@
 	input_set_abs_params(rmi4_data->input_dev, ABS_MT_TOUCH_MAJOR, 0,
 						MAX_TOUCH_MAJOR, 0, 0);
 
-	retval = input_register_device(rmi4_data->input_dev);
-	if (retval) {
-		dev_err(&client->dev, "%s:input register failed\n", __func__);
-		goto err_input_register;
-	}
-
 	/* Clear interrupts */
 	synaptics_rmi4_i2c_block_read(rmi4_data,
 			rmi4_data->fn01_data_base_addr + 1, intr_status,
@@ -1003,15 +997,20 @@
 	if (retval) {
 		dev_err(&client->dev, "%s:Unable to get attn irq %d\n",
 				__func__, platformdata->irq_number);
-		goto err_request_irq;
+		goto err_unset_clientdata;
+	}
+
+	retval = input_register_device(rmi4_data->input_dev);
+	if (retval) {
+		dev_err(&client->dev, "%s:input register failed\n", __func__);
+		goto err_free_irq;
 	}
 
 	return retval;
 
-err_request_irq:
+err_free_irq:
 	free_irq(platformdata->irq_number, rmi4_data);
-	input_unregister_device(rmi4_data->input_dev);
-err_input_register:
+err_unset_clientdata:
 	i2c_set_clientdata(client, NULL);
 err_query_dev:
 	if (platformdata->regulator_en) {
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index 5718645..27e0aa8 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -949,7 +949,7 @@
  *      Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then
  *      schedules a DPC to dispatch I/O.
  */
-void io_mbox_msg(u32 msg)
+int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg)
 {
 	struct io_mgr *pio_mgr;
 	struct dev_object *dev_obj;
@@ -959,9 +959,9 @@
 	dev_get_io_mgr(dev_obj, &pio_mgr);
 
 	if (!pio_mgr)
-		return;
+		return NOTIFY_BAD;
 
-	pio_mgr->intr_val = (u16)msg;
+	pio_mgr->intr_val = (u16)((u32)msg);
 	if (pio_mgr->intr_val & MBX_PM_CLASS)
 		io_dispatch_pm(pio_mgr);
 
@@ -973,7 +973,7 @@
 		spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
 		tasklet_schedule(&pio_mgr->dpc_tasklet);
 	}
-	return;
+	return NOTIFY_OK;
 }
 
 /*
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index a3b0a18..a3f69f6 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -223,6 +223,10 @@
 	bridge_msg_set_queue_id,
 };
 
+static struct notifier_block dsp_mbox_notifier = {
+	.notifier_call = io_mbox_msg,
+};
+
 static inline void flush_all(struct bridge_dev_context *dev_context)
 {
 	if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
@@ -553,7 +557,7 @@
 		 * Enable Mailbox events and also drain any pending
 		 * stale messages.
 		 */
-		dev_context->mbox = omap_mbox_get("dsp");
+		dev_context->mbox = omap_mbox_get("dsp", &dsp_mbox_notifier);
 		if (IS_ERR(dev_context->mbox)) {
 			dev_context->mbox = NULL;
 			pr_err("%s: Failed to get dsp mailbox handle\n",
@@ -563,8 +567,6 @@
 
 	}
 	if (!status) {
-		dev_context->mbox->rxq->callback = (int (*)(void *))io_mbox_msg;
-
 /*PM_IVA2GRPSEL_PER = 0xC0;*/
 		temp = readl(resources->dw_per_pm_base + 0xA8);
 		temp = (temp & 0xFFFFFF30) | 0xC0;
@@ -685,7 +687,7 @@
 	/* Disable the mailbox interrupts */
 	if (dev_context->mbox) {
 		omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
-		omap_mbox_put(dev_context->mbox);
+		omap_mbox_put(dev_context->mbox, &dsp_mbox_notifier);
 		dev_context->mbox = NULL;
 	}
 	/* Reset IVA2 clocks*/
@@ -786,10 +788,7 @@
 
 	pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
 	if (pt_attrs != NULL) {
-		/* Assuming that we use only DSP's memory map
-		 * until 0x4000:0000 , we would need only 1024
-		 * L1 enties i.e L1 size = 4K */
-		pt_attrs->l1_size = 0x1000;
+		pt_attrs->l1_size = SZ_16K; /* 4096 entries of 32 bits */
 		align_size = pt_attrs->l1_size;
 		/* Align sizes are expected to be power of 2 */
 		/* we like to get aligned on L1 table size */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
index 18aec55..8242c70 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/io_sm.h
@@ -72,22 +72,17 @@
 /*
  *  ======== io_mbox_msg ========
  *  Purpose:
- *      Main interrupt handler for the shared memory Bridge channel manager.
- *      Calls the Bridge's chnlsm_isr to determine if this interrupt is ours,
- *      then schedules a DPC to dispatch I/O.
+ *	Main message handler for the shared memory Bridge channel manager.
+ *	Determine if this message is ours, then schedules a DPC to
+ *	dispatch I/O.
  *  Parameters:
- *      ref_data:   Pointer to the channel manager object for this board.
- *                  Set in an initial call to ISR_Install().
+ *	self:	Pointer to its own notifier_block struct.
+ *	len:	Length of message.
+ *	msg:	Message code received.
  *  Returns:
- *      TRUE if interrupt handled; FALSE otherwise.
- *  Requires:
- *      Must be in locked memory if executing in kernel mode.
- *      Must only call functions which are in locked memory if Kernel mode.
- *      Must only call asynchronous services.
- *      Interrupts are disabled and EOI for this interrupt has been sent.
- *  Ensures:
+ *	NOTIFY_OK if handled; NOTIFY_BAD otherwise.
  */
-void io_mbox_msg(u32 msg);
+int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg);
 
 /*
  *  ======== io_request_chnl ========
diff --git a/drivers/staging/tm6000/tm6000-video.c b/drivers/staging/tm6000/tm6000-video.c
index 8fe017c..eb9b9f1 100644
--- a/drivers/staging/tm6000/tm6000-video.c
+++ b/drivers/staging/tm6000/tm6000-video.c
@@ -1450,29 +1450,55 @@
  * ------------------------------------------------------------------
  */
 
-int tm6000_v4l2_register(struct tm6000_core *dev)
+static struct video_device *vdev_init(struct tm6000_core *dev,
+		const struct video_device
+		*template, const char *type_name)
 {
-	int ret = -1;
 	struct video_device *vfd;
 
 	vfd = video_device_alloc();
-	if(!vfd) {
+	if (NULL == vfd)
+		return NULL;
+
+	*vfd = *template;
+	vfd->v4l2_dev = &dev->v4l2_dev;
+	vfd->release = video_device_release;
+	vfd->debug = tm6000_debug;
+	vfd->lock = &dev->lock;
+
+	snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name);
+
+	video_set_drvdata(vfd, dev);
+	return vfd;
+}
+
+int tm6000_v4l2_register(struct tm6000_core *dev)
+{
+	int ret = -1;
+
+	dev->vfd = vdev_init(dev, &tm6000_template, "video");
+
+	if (!dev->vfd) {
+		printk(KERN_INFO "%s: can't register video device\n",
+		       dev->name);
 		return -ENOMEM;
 	}
-	dev->vfd = vfd;
 
 	/* init video dma queues */
 	INIT_LIST_HEAD(&dev->vidq.active);
 	INIT_LIST_HEAD(&dev->vidq.queued);
 
-	memcpy(dev->vfd, &tm6000_template, sizeof(*(dev->vfd)));
-	dev->vfd->debug = tm6000_debug;
-	dev->vfd->lock = &dev->lock;
-
-	vfd->v4l2_dev = &dev->v4l2_dev;
-	video_set_drvdata(vfd, dev);
-
 	ret = video_register_device(dev->vfd, VFL_TYPE_GRABBER, video_nr);
+
+	if (ret < 0) {
+		printk(KERN_INFO "%s: can't register video device\n",
+		       dev->name);
+		return ret;
+	}
+
+	printk(KERN_INFO "%s: registered device %s\n",
+	       dev->name, video_device_node_name(dev->vfd));
+
 	printk(KERN_INFO "Trident TVMaster TM5600/TM6000/TM6010 USB2 board (Load status: %d)\n", ret);
 	return ret;
 }
diff --git a/drivers/staging/usbip/stub.h b/drivers/staging/usbip/stub.h
index 30dbfb6..d732679 100644
--- a/drivers/staging/usbip/stub.h
+++ b/drivers/staging/usbip/stub.h
@@ -32,6 +32,7 @@
 
 struct stub_device {
 	struct usb_interface *interface;
+	struct usb_device *udev;
 	struct list_head list;
 
 	struct usbip_device ud;
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index b186b5f..a7ce51c 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -258,10 +258,11 @@
 static void stub_device_reset(struct usbip_device *ud)
 {
 	struct stub_device *sdev = container_of(ud, struct stub_device, ud);
-	struct usb_device *udev = interface_to_usbdev(sdev->interface);
+	struct usb_device *udev = sdev->udev;
 	int ret;
 
 	usbip_udbg("device reset");
+
 	ret = usb_lock_device_for_reset(udev, sdev->interface);
 	if (ret < 0) {
 		dev_err(&udev->dev, "lock for reset\n");
@@ -309,7 +310,8 @@
  *
  * Allocates and initializes a new stub_device struct.
  */
-static struct stub_device *stub_device_alloc(struct usb_interface *interface)
+static struct stub_device *stub_device_alloc(struct usb_device *udev,
+					     struct usb_interface *interface)
 {
 	struct stub_device *sdev;
 	int busnum = interface_to_busnum(interface);
@@ -324,7 +326,8 @@
 		return NULL;
 	}
 
-	sdev->interface = interface;
+	sdev->interface = usb_get_intf(interface);
+	sdev->udev = usb_get_dev(udev);
 
 	/*
 	 * devid is defined with devnum when this driver is first allocated.
@@ -450,11 +453,12 @@
 			return err;
 		}
 
+		usb_get_intf(interface);
 		return 0;
 	}
 
 	/* ok. this is my device. */
-	sdev = stub_device_alloc(interface);
+	sdev = stub_device_alloc(udev, interface);
 	if (!sdev)
 		return -ENOMEM;
 
@@ -476,6 +480,8 @@
 		dev_err(&interface->dev, "create sysfs files for %s\n",
 			udev_busid);
 		usb_set_intfdata(interface, NULL);
+		usb_put_intf(interface);
+
 		busid_priv->interf_count = 0;
 
 		busid_priv->sdev = NULL;
@@ -545,6 +551,7 @@
 	if (busid_priv->interf_count > 1) {
 		busid_priv->interf_count--;
 		shutdown_busid(busid_priv);
+		usb_put_intf(interface);
 		return;
 	}
 
@@ -554,6 +561,9 @@
 	/* 1. shutdown the current connection */
 	shutdown_busid(busid_priv);
 
+	usb_put_dev(sdev->udev);
+	usb_put_intf(interface);
+
 	/* 3. free sdev */
 	busid_priv->sdev = NULL;
 	stub_device_free(sdev);
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index 3de6fd2..ae6ac82 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -364,7 +364,7 @@
 
 static int get_pipe(struct stub_device *sdev, int epnum, int dir)
 {
-	struct usb_device *udev = interface_to_usbdev(sdev->interface);
+	struct usb_device *udev = sdev->udev;
 	struct usb_host_endpoint *ep;
 	struct usb_endpoint_descriptor *epd = NULL;
 
@@ -484,7 +484,7 @@
 	int ret;
 	struct stub_priv *priv;
 	struct usbip_device *ud = &sdev->ud;
-	struct usb_device *udev = interface_to_usbdev(sdev->interface);
+	struct usb_device *udev = sdev->udev;
 	int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
 
 
diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
index 41a1fe5..afc3b1a 100644
--- a/drivers/staging/usbip/vhci.h
+++ b/drivers/staging/usbip/vhci.h
@@ -100,9 +100,6 @@
 	 * But, the index of this array begins from 0.
 	 */
 	struct vhci_device vdev[VHCI_NPORTS];
-
-	/* vhci_device which has not been assiged its address yet */
-	int pending_port;
 };
 
 
@@ -119,6 +116,9 @@
 void vhci_rx_loop(struct usbip_task *ut);
 void vhci_tx_loop(struct usbip_task *ut);
 
+struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
+					    __u32 seqnum);
+
 #define hardware		(&the_controller->pdev.dev)
 
 static inline struct vhci_device *port_to_vdev(__u32 port)
diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
index 08bd26a..a35fe61 100644
--- a/drivers/staging/usbip/vhci_hcd.c
+++ b/drivers/staging/usbip/vhci_hcd.c
@@ -138,8 +138,6 @@
 	 * the_controller->vdev[rhport].ud.status = VDEV_CONNECT;
 	 * spin_unlock(&the_controller->vdev[rhport].ud.lock); */
 
-	the_controller->pending_port = rhport;
-
 	spin_unlock_irqrestore(&the_controller->lock, flags);
 
 	usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
@@ -559,6 +557,7 @@
 	struct device *dev = &urb->dev->dev;
 	int ret = 0;
 	unsigned long flags;
+	struct vhci_device *vdev;
 
 	usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
 		    hcd, urb, mem_flags);
@@ -574,6 +573,18 @@
 		return urb->status;
 	}
 
+	vdev = port_to_vdev(urb->dev->portnum-1);
+
+	/* refuse enqueue for dead connection */
+	spin_lock(&vdev->ud.lock);
+	if (vdev->ud.status == VDEV_ST_NULL || vdev->ud.status == VDEV_ST_ERROR) {
+		usbip_uerr("enqueue for inactive port %d\n", vdev->rhport);
+		spin_unlock(&vdev->ud.lock);
+		spin_unlock_irqrestore(&the_controller->lock, flags);
+		return -ENODEV;
+	}
+	spin_unlock(&vdev->ud.lock);
+
 	ret = usb_hcd_link_urb_to_ep(hcd, urb);
 	if (ret)
 		goto no_need_unlink;
@@ -592,8 +603,6 @@
 		__u8 type = usb_pipetype(urb->pipe);
 		struct usb_ctrlrequest *ctrlreq =
 				(struct usb_ctrlrequest *) urb->setup_packet;
-		struct vhci_device *vdev =
-				port_to_vdev(the_controller->pending_port);
 
 		if (type != PIPE_CONTROL || !ctrlreq) {
 			dev_err(dev, "invalid request to devnum 0\n");
@@ -607,7 +616,9 @@
 			dev_info(dev, "SetAddress Request (%d) to port %d\n",
 				 ctrlreq->wValue, vdev->rhport);
 
-			vdev->udev = urb->dev;
+			if (vdev->udev)
+				usb_put_dev(vdev->udev);
+			vdev->udev = usb_get_dev(urb->dev);
 
 			spin_lock(&vdev->ud.lock);
 			vdev->ud.status = VDEV_ST_USED;
@@ -627,8 +638,9 @@
 						"Get_Descriptor to device 0 "
 						"(get max pipe size)\n");
 
-			/* FIXME: reference count? (usb_get_dev()) */
-			vdev->udev = urb->dev;
+			if (vdev->udev)
+				usb_put_dev(vdev->udev);
+			vdev->udev = usb_get_dev(urb->dev);
 			goto out;
 
 		default:
@@ -805,7 +817,6 @@
 	return 0;
 }
 
-
 static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
 {
 	struct vhci_unlink *unlink, *tmp;
@@ -813,11 +824,34 @@
 	spin_lock(&vdev->priv_lock);
 
 	list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
+		usbip_uinfo("unlink cleanup tx %lu\n", unlink->unlink_seqnum);
 		list_del(&unlink->list);
 		kfree(unlink);
 	}
 
 	list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) {
+		struct urb *urb;
+
+		/* give back URB of unanswered unlink request */
+		usbip_uinfo("unlink cleanup rx %lu\n", unlink->unlink_seqnum);
+
+		urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
+		if (!urb) {
+			usbip_uinfo("the urb (seqnum %lu) was already given back\n",
+							unlink->unlink_seqnum);
+			list_del(&unlink->list);
+			kfree(unlink);
+			continue;
+		}
+
+		urb->status = -ENODEV;
+
+		spin_lock(&the_controller->lock);
+		usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
+		spin_unlock(&the_controller->lock);
+
+		usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
+
 		list_del(&unlink->list);
 		kfree(unlink);
 	}
@@ -887,6 +921,10 @@
 	vdev->speed  = 0;
 	vdev->devid  = 0;
 
+	if (vdev->udev)
+		usb_put_dev(vdev->udev);
+	vdev->udev = NULL;
+
 	ud->tcp_socket = NULL;
 
 	ud->status = VDEV_ST_NULL;
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 8147d72..bf69914 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -23,16 +23,14 @@
 #include "vhci.h"
 
 
-/* get URB from transmitted urb queue */
-static struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
+/* get URB from transmitted urb queue. caller must hold vdev->priv_lock */
+struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev,
 					    __u32 seqnum)
 {
 	struct vhci_priv *priv, *tmp;
 	struct urb *urb = NULL;
 	int status;
 
-	spin_lock(&vdev->priv_lock);
-
 	list_for_each_entry_safe(priv, tmp, &vdev->priv_rx, list) {
 		if (priv->seqnum == seqnum) {
 			urb = priv->urb;
@@ -63,8 +61,6 @@
 		}
 	}
 
-	spin_unlock(&vdev->priv_lock);
-
 	return urb;
 }
 
@@ -74,9 +70,11 @@
 	struct usbip_device *ud = &vdev->ud;
 	struct urb *urb;
 
+	spin_lock(&vdev->priv_lock);
 
 	urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
 
+	spin_unlock(&vdev->priv_lock);
 
 	if (!urb) {
 		usbip_uerr("cannot find a urb of seqnum %u\n",
@@ -161,7 +159,12 @@
 		return;
 	}
 
+	spin_lock(&vdev->priv_lock);
+
 	urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
+
+	spin_unlock(&vdev->priv_lock);
+
 	if (!urb) {
 		/*
 		 * I get the result of a unlink request. But, it seems that I
@@ -190,6 +193,19 @@
 	return;
 }
 
+static int vhci_priv_tx_empty(struct vhci_device *vdev)
+{
+	int empty = 0;
+
+	spin_lock(&vdev->priv_lock);
+
+	empty = list_empty(&vdev->priv_rx);
+
+	spin_unlock(&vdev->priv_lock);
+
+	return empty;
+}
+
 /* recv a pdu */
 static void vhci_rx_pdu(struct usbip_device *ud)
 {
@@ -202,11 +218,29 @@
 
 	memset(&pdu, 0, sizeof(pdu));
 
-
 	/* 1. receive a pdu header */
 	ret = usbip_xmit(0, ud->tcp_socket, (char *) &pdu, sizeof(pdu), 0);
+	if (ret < 0) {
+		if (ret == -ECONNRESET)
+			usbip_uinfo("connection reset by peer\n");
+		else if (ret == -EAGAIN) {
+			/* ignore if connection was idle */
+			if (vhci_priv_tx_empty(vdev))
+				return;
+			usbip_uinfo("connection timed out with pending urbs\n");
+		} else if (ret != -ERESTARTSYS)
+			usbip_uinfo("xmit failed %d\n", ret);
+
+		usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+		return;
+	}
+	if (ret == 0) {
+		usbip_uinfo("connection closed");
+		usbip_event_add(ud, VDEV_EVENT_DOWN);
+		return;
+	}
 	if (ret != sizeof(pdu)) {
-		usbip_uerr("receiving pdu failed! size is %d, should be %d\n",
+		usbip_uerr("received pdu size is %d, should be %d\n",
 					ret, (unsigned int)sizeof(pdu));
 		usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
 		return;
diff --git a/drivers/staging/vme/bridges/Module.symvers b/drivers/staging/vme/bridges/Module.symvers
deleted file mode 100644
index e69de29..0000000
--- a/drivers/staging/vme/bridges/Module.symvers
+++ /dev/null
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index 7016fdd..e19b932 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -3954,8 +3954,8 @@
 unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo)
 {
 
-	if ((((pVBInfo->VBInfo & SetCRT2ToLCD) | SetCRT2ToLCDA))
-			&& (pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
+	if ((pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) &&
+			(pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
 		return 1;
 
 	return 0;
@@ -8773,7 +8773,7 @@
 
 	if (pVBInfo->IF_DEF_LVDS == 0) {
 		CRT2Index = CRT2Index >> 6; /*  for LCD */
-		if (((pVBInfo->VBInfo & SetCRT2ToLCD) | SetCRT2ToLCDA)) { /*301b*/
+		if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { /*301b*/
 			if (pVBInfo->LCDResInfo != Panel1024x768)
 				VCLKIndex = LCDXlat2VCLK[CRT2Index];
 			else
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 5415712..4bd8cbd 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -227,6 +227,7 @@
 
 		if (zram_test_flag(zram, index, ZRAM_ZERO)) {
 			handle_zero_page(page);
+			index++;
 			continue;
 		}
 
@@ -235,12 +236,14 @@
 			pr_debug("Read before write: sector=%lu, size=%u",
 				(ulong)(bio->bi_sector), bio->bi_size);
 			/* Do nothing */
+			index++;
 			continue;
 		}
 
 		/* Page is stored uncompressed since it's incompressible */
 		if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
 			handle_uncompressed_page(zram, page, index);
+			index++;
 			continue;
 		}
 
@@ -320,6 +323,7 @@
 			mutex_unlock(&zram->lock);
 			zram_stat_inc(&zram->stats.pages_zero);
 			zram_set_flag(zram, index, ZRAM_ZERO);
+			index++;
 			continue;
 		}
 
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 5cfd708..973bb19 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -13,8 +13,7 @@
 				   target_core_transport.o \
 				   target_core_cdb.o \
 				   target_core_ua.o \
-				   target_core_rd.o \
-				   target_core_mib.o
+				   target_core_rd.o
 
 obj-$(CONFIG_TARGET_CORE)	+= target_core_mod.o
 
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 2764510..caf8dc1 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -37,7 +37,6 @@
 #include <linux/parser.h>
 #include <linux/syscalls.h>
 #include <linux/configfs.h>
-#include <linux/proc_fs.h>
 
 #include <target/target_core_base.h>
 #include <target/target_core_device.h>
@@ -1971,13 +1970,35 @@
 {
 	struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
 				struct se_subsystem_dev, se_dev_group);
-	struct config_group *dev_cg;
+	struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
+	struct se_subsystem_api *t = hba->transport;
+	struct config_group *dev_cg = &se_dev->se_dev_group;
 
-	if (!(se_dev))
-		return;
-
-	dev_cg = &se_dev->se_dev_group;
 	kfree(dev_cg->default_groups);
+	/*
+	 * This pointer will set when the storage is enabled with:
+	 *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
+	 */
+	if (se_dev->se_dev_ptr) {
+		printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
+			"virtual_device() for se_dev_ptr: %p\n",
+			se_dev->se_dev_ptr);
+
+		se_free_virtual_device(se_dev->se_dev_ptr, hba);
+	} else {
+		/*
+		 * Release struct se_subsystem_dev->se_dev_su_ptr..
+		 */
+		printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
+			"device() for se_dev_su_ptr: %p\n",
+			se_dev->se_dev_su_ptr);
+
+		t->free_device(se_dev->se_dev_su_ptr);
+	}
+
+	printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
+			"_dev_t: %p\n", se_dev);
+	kfree(se_dev);
 }
 
 static ssize_t target_core_dev_show(struct config_item *item,
@@ -2140,7 +2161,16 @@
 	NULL,
 };
 
+static void target_core_alua_lu_gp_release(struct config_item *item)
+{
+	struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
+			struct t10_alua_lu_gp, lu_gp_group);
+
+	core_alua_free_lu_gp(lu_gp);
+}
+
 static struct configfs_item_operations target_core_alua_lu_gp_ops = {
+	.release		= target_core_alua_lu_gp_release,
 	.show_attribute		= target_core_alua_lu_gp_attr_show,
 	.store_attribute	= target_core_alua_lu_gp_attr_store,
 };
@@ -2191,9 +2221,11 @@
 	printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit"
 		" Group: core/alua/lu_gps/%s, ID: %hu\n",
 		config_item_name(item), lu_gp->lu_gp_id);
-
+	/*
+	 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
+	 * -> target_core_alua_lu_gp_release()
+	 */
 	config_item_put(item);
-	core_alua_free_lu_gp(lu_gp);
 }
 
 static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
@@ -2549,7 +2581,16 @@
 	NULL,
 };
 
+static void target_core_alua_tg_pt_gp_release(struct config_item *item)
+{
+	struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
+			struct t10_alua_tg_pt_gp, tg_pt_gp_group);
+
+	core_alua_free_tg_pt_gp(tg_pt_gp);
+}
+
 static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
+	.release		= target_core_alua_tg_pt_gp_release,
 	.show_attribute		= target_core_alua_tg_pt_gp_attr_show,
 	.store_attribute	= target_core_alua_tg_pt_gp_attr_store,
 };
@@ -2602,9 +2643,11 @@
 	printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port"
 		" Group: alua/tg_pt_gps/%s, ID: %hu\n",
 		config_item_name(item), tg_pt_gp->tg_pt_gp_id);
-
+	/*
+	 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
+	 * -> target_core_alua_tg_pt_gp_release().
+	 */
 	config_item_put(item);
-	core_alua_free_tg_pt_gp(tg_pt_gp);
 }
 
 static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
@@ -2771,13 +2814,11 @@
 	struct se_subsystem_api *t;
 	struct config_item *df_item;
 	struct config_group *dev_cg, *tg_pt_gp_cg;
-	int i, ret;
+	int i;
 
 	hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
 
-	if (mutex_lock_interruptible(&hba->hba_access_mutex))
-		goto out;
-
+	mutex_lock(&hba->hba_access_mutex);
 	t = hba->transport;
 
 	spin_lock(&se_global->g_device_lock);
@@ -2791,7 +2832,10 @@
 		config_item_put(df_item);
 	}
 	kfree(tg_pt_gp_cg->default_groups);
-	core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
+	/*
+	 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
+	 * directly from target_core_alua_tg_pt_gp_release().
+	 */
 	T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
 
 	dev_cg = &se_dev->se_dev_group;
@@ -2800,38 +2844,12 @@
 		dev_cg->default_groups[i] = NULL;
 		config_item_put(df_item);
 	}
-
-	config_item_put(item);
 	/*
-	 * This pointer will set when the storage is enabled with:
-	 * `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
+	 * The releasing of se_dev and associated se_dev->se_dev_ptr is done
+	 * from target_core_dev_item_ops->release() ->target_core_dev_release().
 	 */
-	if (se_dev->se_dev_ptr) {
-		printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
-			"virtual_device() for se_dev_ptr: %p\n",
-				se_dev->se_dev_ptr);
-
-		ret = se_free_virtual_device(se_dev->se_dev_ptr, hba);
-		if (ret < 0)
-			goto hba_out;
-	} else {
-		/*
-		 * Release struct se_subsystem_dev->se_dev_su_ptr..
-		 */
-		printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
-			"device() for se_dev_su_ptr: %p\n",
-			se_dev->se_dev_su_ptr);
-
-		t->free_device(se_dev->se_dev_su_ptr);
-	}
-
-	printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
-		"_dev_t: %p\n", se_dev);
-
-hba_out:
+	config_item_put(item);
 	mutex_unlock(&hba->hba_access_mutex);
-out:
-	kfree(se_dev);
 }
 
 static struct configfs_group_operations target_core_hba_group_ops = {
@@ -2914,6 +2932,13 @@
 
 CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
 
+static void target_core_hba_release(struct config_item *item)
+{
+	struct se_hba *hba = container_of(to_config_group(item),
+				struct se_hba, hba_group);
+	core_delete_hba(hba);
+}
+
 static struct configfs_attribute *target_core_hba_attrs[] = {
 	&target_core_hba_hba_info.attr,
 	&target_core_hba_hba_mode.attr,
@@ -2921,6 +2946,7 @@
 };
 
 static struct configfs_item_operations target_core_hba_item_ops = {
+	.release		= target_core_hba_release,
 	.show_attribute		= target_core_hba_attr_show,
 	.store_attribute	= target_core_hba_attr_store,
 };
@@ -2997,10 +3023,11 @@
 	struct config_group *group,
 	struct config_item *item)
 {
-	struct se_hba *hba = item_to_hba(item);
-
+	/*
+	 * core_delete_hba() is called from target_core_hba_item_ops->release()
+	 * -> target_core_hba_release()
+	 */
 	config_item_put(item);
-	core_delete_hba(hba);
 }
 
 static struct configfs_group_operations target_core_group_ops = {
@@ -3022,7 +3049,6 @@
 	struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
 	struct config_group *lu_gp_cg = NULL;
 	struct configfs_subsystem *subsys;
-	struct proc_dir_entry *scsi_target_proc = NULL;
 	struct t10_alua_lu_gp *lu_gp;
 	int ret;
 
@@ -3128,21 +3154,10 @@
 	if (core_dev_setup_virtual_lun0() < 0)
 		goto out;
 
-	scsi_target_proc = proc_mkdir("scsi_target", 0);
-	if (!(scsi_target_proc)) {
-		printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n");
-		goto out;
-	}
-	ret = init_scsi_target_mib();
-	if (ret < 0)
-		goto out;
-
 	return 0;
 
 out:
 	configfs_unregister_subsystem(subsys);
-	if (scsi_target_proc)
-		remove_proc_entry("scsi_target", 0);
 	core_dev_release_virtual_lun0();
 	rd_module_exit();
 out_global:
@@ -3178,8 +3193,7 @@
 		config_item_put(item);
 	}
 	kfree(lu_gp_cg->default_groups);
-	core_alua_free_lu_gp(se_global->default_lu_gp);
-	se_global->default_lu_gp = NULL;
+	lu_gp_cg->default_groups = NULL;
 
 	alua_cg = &se_global->alua_group;
 	for (i = 0; alua_cg->default_groups[i]; i++) {
@@ -3188,6 +3202,7 @@
 		config_item_put(item);
 	}
 	kfree(alua_cg->default_groups);
+	alua_cg->default_groups = NULL;
 
 	hba_cg = &se_global->target_core_hbagroup;
 	for (i = 0; hba_cg->default_groups[i]; i++) {
@@ -3196,20 +3211,20 @@
 		config_item_put(item);
 	}
 	kfree(hba_cg->default_groups);
-
-	for (i = 0; subsys->su_group.default_groups[i]; i++) {
-		item = &subsys->su_group.default_groups[i]->cg_item;
-		subsys->su_group.default_groups[i] = NULL;
-		config_item_put(item);
-	}
+	hba_cg->default_groups = NULL;
+	/*
+	 * We expect subsys->su_group.default_groups to be released
+	 * by configfs subsystem provider logic..
+	 */
+	configfs_unregister_subsystem(subsys);
 	kfree(subsys->su_group.default_groups);
 
-	configfs_unregister_subsystem(subsys);
+	core_alua_free_lu_gp(se_global->default_lu_gp);
+	se_global->default_lu_gp = NULL;
+
 	printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
 			" Infrastructure\n");
 
-	remove_scsi_target_mib();
-	remove_proc_entry("scsi_target", 0);
 	core_dev_release_virtual_lun0();
 	rd_module_exit();
 	release_se_global();
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 317ce58..5da051a 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -373,11 +373,11 @@
 		/*
 		 * deve->se_lun_acl will be NULL for demo-mode created LUNs
 		 * that have not been explictly concerted to MappedLUNs ->
-		 * struct se_lun_acl.
+		 * struct se_lun_acl, but we remove deve->alua_port_list from
+		 * port->sep_alua_list. This also means that active UAs and
+		 * NodeACL context specific PR metadata for demo-mode
+		 * MappedLUN *deve will be released below..
 		 */
-		if (!(deve->se_lun_acl))
-			return 0;
-
 		spin_lock_bh(&port->sep_alua_lock);
 		list_del(&deve->alua_port_list);
 		spin_unlock_bh(&port->sep_alua_lock);
@@ -395,12 +395,14 @@
 				printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
 					" already set for demo mode -> explict"
 					" LUN ACL transition\n");
+				spin_unlock_irq(&nacl->device_list_lock);
 				return -1;
 			}
 			if (deve->se_lun != lun) {
 				printk(KERN_ERR "struct se_dev_entry->se_lun does"
 					" match passed struct se_lun for demo mode"
 					" -> explict LUN ACL transition\n");
+				spin_unlock_irq(&nacl->device_list_lock);
 				return -1;
 			}
 			deve->se_lun_acl = lun_acl;
@@ -865,9 +867,6 @@
 		}
 	}
 	spin_unlock(&hba->device_lock);
-
-	while (atomic_read(&hba->dev_mib_access_count))
-		cpu_relax();
 }
 
 int se_dev_check_online(struct se_device *dev)
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 32b148d..b65d1c8 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -214,12 +214,22 @@
 
 CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group);
 
+static void target_fabric_mappedlun_release(struct config_item *item)
+{
+	struct se_lun_acl *lacl = container_of(to_config_group(item),
+				struct se_lun_acl, se_lun_group);
+	struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
+
+	core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
+}
+
 static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
 	&target_fabric_mappedlun_write_protect.attr,
 	NULL,
 };
 
 static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
+	.release		= target_fabric_mappedlun_release,
 	.show_attribute		= target_fabric_mappedlun_attr_show,
 	.store_attribute	= target_fabric_mappedlun_attr_store,
 	.allow_link		= target_fabric_mappedlun_link,
@@ -337,15 +347,21 @@
 	struct config_group *group,
 	struct config_item *item)
 {
-	struct se_lun_acl *lacl = container_of(to_config_group(item),
-			struct se_lun_acl, se_lun_group);
-	struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
-
 	config_item_put(item);
-	core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
+}
+
+static void target_fabric_nacl_base_release(struct config_item *item)
+{
+	struct se_node_acl *se_nacl = container_of(to_config_group(item),
+			struct se_node_acl, acl_group);
+	struct se_portal_group *se_tpg = se_nacl->se_tpg;
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+	tf->tf_ops.fabric_drop_nodeacl(se_nacl);
 }
 
 static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
+	.release		= target_fabric_nacl_base_release,
 	.show_attribute		= target_fabric_nacl_base_attr_show,
 	.store_attribute	= target_fabric_nacl_base_attr_store,
 };
@@ -404,9 +420,6 @@
 	struct config_group *group,
 	struct config_item *item)
 {
-	struct se_portal_group *se_tpg = container_of(group,
-			struct se_portal_group, tpg_acl_group);
-	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
 	struct se_node_acl *se_nacl = container_of(to_config_group(item),
 			struct se_node_acl, acl_group);
 	struct config_item *df_item;
@@ -419,9 +432,10 @@
 		nacl_cg->default_groups[i] = NULL;
 		config_item_put(df_item);
 	}
-
+	/*
+	 * struct se_node_acl free is done in target_fabric_nacl_base_release()
+	 */
 	config_item_put(item);
-	tf->tf_ops.fabric_drop_nodeacl(se_nacl);
 }
 
 static struct configfs_group_operations target_fabric_nacl_group_ops = {
@@ -437,7 +451,18 @@
 
 CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group);
 
+static void target_fabric_np_base_release(struct config_item *item)
+{
+	struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
+				struct se_tpg_np, tpg_np_group);
+	struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent;
+	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
+
+	tf->tf_ops.fabric_drop_np(se_tpg_np);
+}
+
 static struct configfs_item_operations target_fabric_np_base_item_ops = {
+	.release		= target_fabric_np_base_release,
 	.show_attribute		= target_fabric_np_base_attr_show,
 	.store_attribute	= target_fabric_np_base_attr_store,
 };
@@ -466,6 +491,7 @@
 	if (!(se_tpg_np) || IS_ERR(se_tpg_np))
 		return ERR_PTR(-EINVAL);
 
+	se_tpg_np->tpg_np_parent = se_tpg;
 	config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
 			&TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
 
@@ -476,14 +502,10 @@
 	struct config_group *group,
 	struct config_item *item)
 {
-	struct se_portal_group *se_tpg = container_of(group,
-				struct se_portal_group, tpg_np_group);
-	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
-	struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
-				struct se_tpg_np, tpg_np_group);
-
+	/*
+	 * struct se_tpg_np is released via target_fabric_np_base_release()
+	 */
 	config_item_put(item);
-	tf->tf_ops.fabric_drop_np(se_tpg_np);
 }
 
 static struct configfs_group_operations target_fabric_np_group_ops = {
@@ -814,7 +836,18 @@
  */
 CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group);
 
+static void target_fabric_tpg_release(struct config_item *item)
+{
+	struct se_portal_group *se_tpg = container_of(to_config_group(item),
+			struct se_portal_group, tpg_group);
+	struct se_wwn *wwn = se_tpg->se_tpg_wwn;
+	struct target_fabric_configfs *tf = wwn->wwn_tf;
+
+	tf->tf_ops.fabric_drop_tpg(se_tpg);
+}
+
 static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
+	.release		= target_fabric_tpg_release,
 	.show_attribute		= target_fabric_tpg_attr_show,
 	.store_attribute	= target_fabric_tpg_attr_store,
 };
@@ -872,8 +905,6 @@
 	struct config_group *group,
 	struct config_item *item)
 {
-	struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
-	struct target_fabric_configfs *tf = wwn->wwn_tf;
 	struct se_portal_group *se_tpg = container_of(to_config_group(item),
 				struct se_portal_group, tpg_group);
 	struct config_group *tpg_cg = &se_tpg->tpg_group;
@@ -890,15 +921,28 @@
 	}
 
 	config_item_put(item);
-	tf->tf_ops.fabric_drop_tpg(se_tpg);
 }
 
+static void target_fabric_release_wwn(struct config_item *item)
+{
+	struct se_wwn *wwn = container_of(to_config_group(item),
+				struct se_wwn, wwn_group);
+	struct target_fabric_configfs *tf = wwn->wwn_tf;
+
+	tf->tf_ops.fabric_drop_wwn(wwn);
+}
+
+static struct configfs_item_operations target_fabric_tpg_item_ops = {
+	.release	= target_fabric_release_wwn,
+};
+
 static struct configfs_group_operations target_fabric_tpg_group_ops = {
 	.make_group	= target_fabric_make_tpg,
 	.drop_item	= target_fabric_drop_tpg,
 };
 
-TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL);
+TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops,
+		NULL);
 
 /* End of tfc_tpg_cit */
 
@@ -932,13 +976,7 @@
 	struct config_group *group,
 	struct config_item *item)
 {
-	struct target_fabric_configfs *tf = container_of(group,
-				struct target_fabric_configfs, tf_group);
-	struct se_wwn *wwn = container_of(to_config_group(item),
-				struct se_wwn, wwn_group);
-
 	config_item_put(item);
-	tf->tf_ops.fabric_drop_wwn(wwn);
 }
 
 static struct configfs_group_operations target_fabric_wwn_group_ops = {
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index c6e0d75..67f0c09 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -154,7 +154,7 @@
 
 	bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
 				FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
-	if (!(bd))
+	if (IS_ERR(bd))
 		goto failed;
 	/*
 	 * Setup the local scope queue_limits from struct request_queue->limits
@@ -220,8 +220,10 @@
 {
 	struct iblock_dev *ib_dev = p;
 
-	blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
-	bioset_free(ib_dev->ibd_bio_set);
+	if (ib_dev->ibd_bd != NULL)
+		blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
+	if (ib_dev->ibd_bio_set != NULL)
+		bioset_free(ib_dev->ibd_bio_set);
 	kfree(ib_dev);
 }
 
diff --git a/drivers/target/target_core_mib.c b/drivers/target/target_core_mib.c
deleted file mode 100644
index d5a48aa..0000000
--- a/drivers/target/target_core_mib.c
+++ /dev/null
@@ -1,1078 +0,0 @@
-/*******************************************************************************
- * Filename:  target_core_mib.c
- *
- * Copyright (c) 2006-2007 SBE, Inc.  All Rights Reserved.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
- *
- * Nicholas A. Bellinger <nab@linux-iscsi.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- ******************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/string.h>
-#include <linux/version.h>
-#include <generated/utsrelease.h>
-#include <linux/utsname.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/blkdev.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-
-#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
-#include <target/target_core_configfs.h>
-
-#include "target_core_hba.h"
-#include "target_core_mib.h"
-
-/* SCSI mib table index */
-static struct scsi_index_table scsi_index_table;
-
-#ifndef INITIAL_JIFFIES
-#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
-#endif
-
-/* SCSI Instance Table */
-#define SCSI_INST_SW_INDEX		1
-#define SCSI_TRANSPORT_INDEX		1
-
-#define NONE		"None"
-#define ISPRINT(a)   ((a >= ' ') && (a <= '~'))
-
-static inline int list_is_first(const struct list_head *list,
-				const struct list_head *head)
-{
-	return list->prev == head;
-}
-
-static void *locate_hba_start(
-	struct seq_file *seq,
-	loff_t *pos)
-{
-	spin_lock(&se_global->g_device_lock);
-	return seq_list_start(&se_global->g_se_dev_list, *pos);
-}
-
-static void *locate_hba_next(
-	struct seq_file *seq,
-	void *v,
-	loff_t *pos)
-{
-	return seq_list_next(v, &se_global->g_se_dev_list, pos);
-}
-
-static void locate_hba_stop(struct seq_file *seq, void *v)
-{
-	spin_unlock(&se_global->g_device_lock);
-}
-
-/****************************************************************************
- * SCSI MIB Tables
- ****************************************************************************/
-
-/*
- * SCSI Instance Table
- */
-static void *scsi_inst_seq_start(
-	struct seq_file *seq,
-	loff_t *pos)
-{
-	spin_lock(&se_global->hba_lock);
-	return seq_list_start(&se_global->g_hba_list, *pos);
-}
-
-static void *scsi_inst_seq_next(
-	struct seq_file *seq,
-	void *v,
-	loff_t *pos)
-{
-	return seq_list_next(v, &se_global->g_hba_list, pos);
-}
-
-static void scsi_inst_seq_stop(struct seq_file *seq, void *v)
-{
-	spin_unlock(&se_global->hba_lock);
-}
-
-static int scsi_inst_seq_show(struct seq_file *seq, void *v)
-{
-	struct se_hba *hba = list_entry(v, struct se_hba, hba_list);
-
-	if (list_is_first(&hba->hba_list, &se_global->g_hba_list))
-		seq_puts(seq, "inst sw_indx\n");
-
-	seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX);
-	seq_printf(seq, "plugin: %s version: %s\n",
-			hba->transport->name, TARGET_CORE_VERSION);
-
-	return 0;
-}
-
-static const struct seq_operations scsi_inst_seq_ops = {
-	.start	= scsi_inst_seq_start,
-	.next	= scsi_inst_seq_next,
-	.stop	= scsi_inst_seq_stop,
-	.show	= scsi_inst_seq_show
-};
-
-static int scsi_inst_seq_open(struct inode *inode, struct file *file)
-{
-	return seq_open(file, &scsi_inst_seq_ops);
-}
-
-static const struct file_operations scsi_inst_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = scsi_inst_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
-};
-
-/*
- * SCSI Device Table
- */
-static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos)
-{
-	return locate_hba_start(seq, pos);
-}
-
-static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-	return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_dev_seq_stop(struct seq_file *seq, void *v)
-{
-	locate_hba_stop(seq, v);
-}
-
-static int scsi_dev_seq_show(struct seq_file *seq, void *v)
-{
-	struct se_hba *hba;
-	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-						g_se_dev_list);
-	struct se_device *dev = se_dev->se_dev_ptr;
-	char str[28];
-	int k;
-
-	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-		seq_puts(seq, "inst indx role ports\n");
-
-	if (!(dev))
-		return 0;
-
-	hba = dev->se_hba;
-	if (!(hba)) {
-		/* Log error ? */
-		return 0;
-	}
-
-	seq_printf(seq, "%u %u %s %u\n", hba->hba_index,
-		   dev->dev_index, "Target", dev->dev_port_count);
-
-	memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
-
-	/* vendor */
-	for (k = 0; k < 8; k++)
-		str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ?
-				DEV_T10_WWN(dev)->vendor[k] : 0x20;
-	str[k] = 0x20;
-
-	/* model */
-	for (k = 0; k < 16; k++)
-		str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ?
-				DEV_T10_WWN(dev)->model[k] : 0x20;
-	str[k + 9] = 0;
-
-	seq_printf(seq, "dev_alias: %s\n", str);
-
-	return 0;
-}
-
-static const struct seq_operations scsi_dev_seq_ops = {
-	.start  = scsi_dev_seq_start,
-	.next   = scsi_dev_seq_next,
-	.stop   = scsi_dev_seq_stop,
-	.show   = scsi_dev_seq_show
-};
-
-static int scsi_dev_seq_open(struct inode *inode, struct file *file)
-{
-	return seq_open(file, &scsi_dev_seq_ops);
-}
-
-static const struct file_operations scsi_dev_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = scsi_dev_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
-};
-
-/*
- * SCSI Port Table
- */
-static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos)
-{
-	return locate_hba_start(seq, pos);
-}
-
-static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-	return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_port_seq_stop(struct seq_file *seq, void *v)
-{
-	locate_hba_stop(seq, v);
-}
-
-static int scsi_port_seq_show(struct seq_file *seq, void *v)
-{
-	struct se_hba *hba;
-	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-						g_se_dev_list);
-	struct se_device *dev = se_dev->se_dev_ptr;
-	struct se_port *sep, *sep_tmp;
-
-	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-		seq_puts(seq, "inst device indx role busy_count\n");
-
-	if (!(dev))
-		return 0;
-
-	hba = dev->se_hba;
-	if (!(hba)) {
-		/* Log error ? */
-		return 0;
-	}
-
-	/* FIXME: scsiPortBusyStatuses count */
-	spin_lock(&dev->se_port_lock);
-	list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
-		seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index,
-			dev->dev_index, sep->sep_index, "Device",
-			dev->dev_index, 0);
-	}
-	spin_unlock(&dev->se_port_lock);
-
-	return 0;
-}
-
-static const struct seq_operations scsi_port_seq_ops = {
-	.start  = scsi_port_seq_start,
-	.next   = scsi_port_seq_next,
-	.stop   = scsi_port_seq_stop,
-	.show   = scsi_port_seq_show
-};
-
-static int scsi_port_seq_open(struct inode *inode, struct file *file)
-{
-	return seq_open(file, &scsi_port_seq_ops);
-}
-
-static const struct file_operations scsi_port_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = scsi_port_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
-};
-
-/*
- * SCSI Transport Table
- */
-static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos)
-{
-	return locate_hba_start(seq, pos);
-}
-
-static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-	return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_transport_seq_stop(struct seq_file *seq, void *v)
-{
-	locate_hba_stop(seq, v);
-}
-
-static int scsi_transport_seq_show(struct seq_file *seq, void *v)
-{
-	struct se_hba *hba;
-	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-						g_se_dev_list);
-	struct se_device *dev = se_dev->se_dev_ptr;
-	struct se_port *se, *se_tmp;
-	struct se_portal_group *tpg;
-	struct t10_wwn *wwn;
-	char buf[64];
-
-	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-		seq_puts(seq, "inst device indx dev_name\n");
-
-	if (!(dev))
-		return 0;
-
-	hba = dev->se_hba;
-	if (!(hba)) {
-		/* Log error ? */
-		return 0;
-	}
-
-	wwn = DEV_T10_WWN(dev);
-
-	spin_lock(&dev->se_port_lock);
-	list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) {
-		tpg = se->sep_tpg;
-		sprintf(buf, "scsiTransport%s",
-				TPG_TFO(tpg)->get_fabric_name());
-
-		seq_printf(seq, "%u %s %u %s+%s\n",
-			hba->hba_index, /* scsiTransportIndex */
-			buf,  /* scsiTransportType */
-			(TPG_TFO(tpg)->tpg_get_inst_index != NULL) ?
-			TPG_TFO(tpg)->tpg_get_inst_index(tpg) :
-			0,
-			TPG_TFO(tpg)->tpg_get_wwn(tpg),
-			(strlen(wwn->unit_serial)) ?
-			/* scsiTransportDevName */
-			wwn->unit_serial : wwn->vendor);
-	}
-	spin_unlock(&dev->se_port_lock);
-
-	return 0;
-}
-
-static const struct seq_operations scsi_transport_seq_ops = {
-	.start  = scsi_transport_seq_start,
-	.next   = scsi_transport_seq_next,
-	.stop   = scsi_transport_seq_stop,
-	.show   = scsi_transport_seq_show
-};
-
-static int scsi_transport_seq_open(struct inode *inode, struct file *file)
-{
-	return seq_open(file, &scsi_transport_seq_ops);
-}
-
-static const struct file_operations scsi_transport_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = scsi_transport_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
-};
-
-/*
- * SCSI Target Device Table
- */
-static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos)
-{
-	return locate_hba_start(seq, pos);
-}
-
-static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-	return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v)
-{
-	locate_hba_stop(seq, v);
-}
-
-
-#define LU_COUNT	1  /* for now */
-static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v)
-{
-	struct se_hba *hba;
-	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-						g_se_dev_list);
-	struct se_device *dev = se_dev->se_dev_ptr;
-	int non_accessible_lus = 0;
-	char status[16];
-
-	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-		seq_puts(seq, "inst indx num_LUs status non_access_LUs"
-			" resets\n");
-
-	if (!(dev))
-		return 0;
-
-	hba = dev->se_hba;
-	if (!(hba)) {
-		/* Log error ? */
-		return 0;
-	}
-
-	switch (dev->dev_status) {
-	case TRANSPORT_DEVICE_ACTIVATED:
-		strcpy(status, "activated");
-		break;
-	case TRANSPORT_DEVICE_DEACTIVATED:
-		strcpy(status, "deactivated");
-		non_accessible_lus = 1;
-		break;
-	case TRANSPORT_DEVICE_SHUTDOWN:
-		strcpy(status, "shutdown");
-		non_accessible_lus = 1;
-		break;
-	case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
-	case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
-		strcpy(status, "offline");
-		non_accessible_lus = 1;
-		break;
-	default:
-		sprintf(status, "unknown(%d)", dev->dev_status);
-		non_accessible_lus = 1;
-	}
-
-	seq_printf(seq, "%u %u %u %s %u %u\n",
-		   hba->hba_index, dev->dev_index, LU_COUNT,
-		   status, non_accessible_lus, dev->num_resets);
-
-	return 0;
-}
-
-static const struct seq_operations scsi_tgt_dev_seq_ops = {
-	.start  = scsi_tgt_dev_seq_start,
-	.next   = scsi_tgt_dev_seq_next,
-	.stop   = scsi_tgt_dev_seq_stop,
-	.show   = scsi_tgt_dev_seq_show
-};
-
-static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file)
-{
-	return seq_open(file, &scsi_tgt_dev_seq_ops);
-}
-
-static const struct file_operations scsi_tgt_dev_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = scsi_tgt_dev_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
-};
-
-/*
- * SCSI Target Port Table
- */
-static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos)
-{
-	return locate_hba_start(seq, pos);
-}
-
-static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-	return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v)
-{
-	locate_hba_stop(seq, v);
-}
-
-static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v)
-{
-	struct se_hba *hba;
-	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-						g_se_dev_list);
-	struct se_device *dev = se_dev->se_dev_ptr;
-	struct se_port *sep, *sep_tmp;
-	struct se_portal_group *tpg;
-	u32 rx_mbytes, tx_mbytes;
-	unsigned long long num_cmds;
-	char buf[64];
-
-	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-		seq_puts(seq, "inst device indx name port_index in_cmds"
-			" write_mbytes read_mbytes hs_in_cmds\n");
-
-	if (!(dev))
-		return 0;
-
-	hba = dev->se_hba;
-	if (!(hba)) {
-		/* Log error ? */
-		return 0;
-	}
-
-	spin_lock(&dev->se_port_lock);
-	list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
-		tpg = sep->sep_tpg;
-		sprintf(buf, "%sPort#",
-			TPG_TFO(tpg)->get_fabric_name());
-
-		seq_printf(seq, "%u %u %u %s%d %s%s%d ",
-		     hba->hba_index,
-		     dev->dev_index,
-		     sep->sep_index,
-		     buf, sep->sep_index,
-		     TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
-		     TPG_TFO(tpg)->tpg_get_tag(tpg));
-
-		spin_lock(&sep->sep_lun->lun_sep_lock);
-		num_cmds = sep->sep_stats.cmd_pdus;
-		rx_mbytes = (sep->sep_stats.rx_data_octets >> 20);
-		tx_mbytes = (sep->sep_stats.tx_data_octets >> 20);
-		spin_unlock(&sep->sep_lun->lun_sep_lock);
-
-		seq_printf(seq, "%llu %u %u %u\n", num_cmds,
-			rx_mbytes, tx_mbytes, 0);
-	}
-	spin_unlock(&dev->se_port_lock);
-
-	return 0;
-}
-
-static const struct seq_operations scsi_tgt_port_seq_ops = {
-	.start  = scsi_tgt_port_seq_start,
-	.next   = scsi_tgt_port_seq_next,
-	.stop   = scsi_tgt_port_seq_stop,
-	.show   = scsi_tgt_port_seq_show
-};
-
-static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file)
-{
-	return seq_open(file, &scsi_tgt_port_seq_ops);
-}
-
-static const struct file_operations scsi_tgt_port_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = scsi_tgt_port_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
-};
-
-/*
- * SCSI Authorized Initiator Table:
- * It contains the SCSI Initiators authorized to be attached to one of the
- * local Target ports.
- * Iterates through all active TPGs and extracts the info from the ACLs
- */
-static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos)
-{
-	spin_lock_bh(&se_global->se_tpg_lock);
-	return seq_list_start(&se_global->g_se_tpg_list, *pos);
-}
-
-static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v,
-					 loff_t *pos)
-{
-	return seq_list_next(v, &se_global->g_se_tpg_list, pos);
-}
-
-static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v)
-{
-	spin_unlock_bh(&se_global->se_tpg_lock);
-}
-
-static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v)
-{
-	struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
-						se_tpg_list);
-	struct se_dev_entry *deve;
-	struct se_lun *lun;
-	struct se_node_acl *se_nacl;
-	int j;
-
-	if (list_is_first(&se_tpg->se_tpg_list,
-			  &se_global->g_se_tpg_list))
-		seq_puts(seq, "inst dev port indx dev_or_port intr_name "
-			 "map_indx att_count num_cmds read_mbytes "
-			 "write_mbytes hs_num_cmds creation_time row_status\n");
-
-	if (!(se_tpg))
-		return 0;
-
-	spin_lock(&se_tpg->acl_node_lock);
-	list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) {
-
-		atomic_inc(&se_nacl->mib_ref_count);
-		smp_mb__after_atomic_inc();
-		spin_unlock(&se_tpg->acl_node_lock);
-
-		spin_lock_irq(&se_nacl->device_list_lock);
-		for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
-			deve = &se_nacl->device_list[j];
-			if (!(deve->lun_flags &
-					TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
-			    (!deve->se_lun))
-				continue;
-			lun = deve->se_lun;
-			if (!lun->lun_se_dev)
-				continue;
-
-			seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u"
-					" %u %s\n",
-				/* scsiInstIndex */
-				(TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
-				TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
-				0,
-				/* scsiDeviceIndex */
-				lun->lun_se_dev->dev_index,
-				/* scsiAuthIntrTgtPortIndex */
-				TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
-				/* scsiAuthIntrIndex */
-				se_nacl->acl_index,
-				/* scsiAuthIntrDevOrPort */
-				1,
-				/* scsiAuthIntrName */
-				se_nacl->initiatorname[0] ?
-					se_nacl->initiatorname : NONE,
-				/* FIXME: scsiAuthIntrLunMapIndex */
-				0,
-				/* scsiAuthIntrAttachedTimes */
-				deve->attach_count,
-				/* scsiAuthIntrOutCommands */
-				deve->total_cmds,
-				/* scsiAuthIntrReadMegaBytes */
-				(u32)(deve->read_bytes >> 20),
-				/* scsiAuthIntrWrittenMegaBytes */
-				(u32)(deve->write_bytes >> 20),
-				/* FIXME: scsiAuthIntrHSOutCommands */
-				0,
-				/* scsiAuthIntrLastCreation */
-				(u32)(((u32)deve->creation_time -
-					    INITIAL_JIFFIES) * 100 / HZ),
-				/* FIXME: scsiAuthIntrRowStatus */
-				"Ready");
-		}
-		spin_unlock_irq(&se_nacl->device_list_lock);
-
-		spin_lock(&se_tpg->acl_node_lock);
-		atomic_dec(&se_nacl->mib_ref_count);
-		smp_mb__after_atomic_dec();
-	}
-	spin_unlock(&se_tpg->acl_node_lock);
-
-	return 0;
-}
-
-static const struct seq_operations scsi_auth_intr_seq_ops = {
-	.start	= scsi_auth_intr_seq_start,
-	.next	= scsi_auth_intr_seq_next,
-	.stop	= scsi_auth_intr_seq_stop,
-	.show	= scsi_auth_intr_seq_show
-};
-
-static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file)
-{
-	return seq_open(file, &scsi_auth_intr_seq_ops);
-}
-
-static const struct file_operations scsi_auth_intr_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = scsi_auth_intr_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
-};
-
-/*
- * SCSI Attached Initiator Port Table:
- * It lists the SCSI Initiators attached to one of the local Target ports.
- * Iterates through all active TPGs and use active sessions from each TPG
- * to list the info fo this table.
- */
-static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos)
-{
-	spin_lock_bh(&se_global->se_tpg_lock);
-	return seq_list_start(&se_global->g_se_tpg_list, *pos);
-}
-
-static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v,
-					 loff_t *pos)
-{
-	return seq_list_next(v, &se_global->g_se_tpg_list, pos);
-}
-
-static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v)
-{
-	spin_unlock_bh(&se_global->se_tpg_lock);
-}
-
-static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v)
-{
-	struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
-						se_tpg_list);
-	struct se_dev_entry *deve;
-	struct se_lun *lun;
-	struct se_node_acl *se_nacl;
-	struct se_session *se_sess;
-	unsigned char buf[64];
-	int j;
-
-	if (list_is_first(&se_tpg->se_tpg_list,
-			  &se_global->g_se_tpg_list))
-		seq_puts(seq, "inst dev port indx port_auth_indx port_name"
-			" port_ident\n");
-
-	if (!(se_tpg))
-		return 0;
-
-	spin_lock(&se_tpg->session_lock);
-	list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
-		if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) ||
-		    (!se_sess->se_node_acl) ||
-		    (!se_sess->se_node_acl->device_list))
-			continue;
-
-		atomic_inc(&se_sess->mib_ref_count);
-		smp_mb__after_atomic_inc();
-		se_nacl = se_sess->se_node_acl;
-		atomic_inc(&se_nacl->mib_ref_count);
-		smp_mb__after_atomic_inc();
-		spin_unlock(&se_tpg->session_lock);
-
-		spin_lock_irq(&se_nacl->device_list_lock);
-		for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
-			deve = &se_nacl->device_list[j];
-			if (!(deve->lun_flags &
-					TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
-			   (!deve->se_lun))
-				continue;
-
-			lun = deve->se_lun;
-			if (!lun->lun_se_dev)
-				continue;
-
-			memset(buf, 0, 64);
-			if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL)
-				TPG_TFO(se_tpg)->sess_get_initiator_sid(
-					se_sess, (unsigned char *)&buf[0], 64);
-
-			seq_printf(seq, "%u %u %u %u %u %s+i+%s\n",
-				/* scsiInstIndex */
-				(TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
-				TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
-				0,
-				/* scsiDeviceIndex */
-				lun->lun_se_dev->dev_index,
-				/* scsiPortIndex */
-				TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
-				/* scsiAttIntrPortIndex */
-				(TPG_TFO(se_tpg)->sess_get_index != NULL) ?
-				TPG_TFO(se_tpg)->sess_get_index(se_sess) :
-				0,
-				/* scsiAttIntrPortAuthIntrIdx */
-				se_nacl->acl_index,
-				/* scsiAttIntrPortName */
-				se_nacl->initiatorname[0] ?
-					se_nacl->initiatorname : NONE,
-				/* scsiAttIntrPortIdentifier */
-				buf);
-		}
-		spin_unlock_irq(&se_nacl->device_list_lock);
-
-		spin_lock(&se_tpg->session_lock);
-		atomic_dec(&se_nacl->mib_ref_count);
-		smp_mb__after_atomic_dec();
-		atomic_dec(&se_sess->mib_ref_count);
-		smp_mb__after_atomic_dec();
-	}
-	spin_unlock(&se_tpg->session_lock);
-
-	return 0;
-}
-
-static const struct seq_operations scsi_att_intr_port_seq_ops = {
-	.start	= scsi_att_intr_port_seq_start,
-	.next	= scsi_att_intr_port_seq_next,
-	.stop	= scsi_att_intr_port_seq_stop,
-	.show	= scsi_att_intr_port_seq_show
-};
-
-static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file)
-{
-	return seq_open(file, &scsi_att_intr_port_seq_ops);
-}
-
-static const struct file_operations scsi_att_intr_port_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = scsi_att_intr_port_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
-};
-
-/*
- * SCSI Logical Unit Table
- */
-static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos)
-{
-	return locate_hba_start(seq, pos);
-}
-
-static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-	return locate_hba_next(seq, v, pos);
-}
-
-static void scsi_lu_seq_stop(struct seq_file *seq, void *v)
-{
-	locate_hba_stop(seq, v);
-}
-
-#define SCSI_LU_INDEX		1
-static int scsi_lu_seq_show(struct seq_file *seq, void *v)
-{
-	struct se_hba *hba;
-	struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
-						g_se_dev_list);
-	struct se_device *dev = se_dev->se_dev_ptr;
-	int j;
-	char str[28];
-
-	if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
-		seq_puts(seq, "inst dev indx LUN lu_name vend prod rev"
-		" dev_type status state-bit num_cmds read_mbytes"
-		" write_mbytes resets full_stat hs_num_cmds creation_time\n");
-
-	if (!(dev))
-		return 0;
-
-	hba = dev->se_hba;
-	if (!(hba)) {
-		/* Log error ? */
-		return 0;
-	}
-
-	/* Fix LU state, if we can read it from the device */
-	seq_printf(seq, "%u %u %u %llu %s", hba->hba_index,
-			dev->dev_index, SCSI_LU_INDEX,
-			(unsigned long long)0, /* FIXME: scsiLuDefaultLun */
-			(strlen(DEV_T10_WWN(dev)->unit_serial)) ?
-			/* scsiLuWwnName */
-			(char *)&DEV_T10_WWN(dev)->unit_serial[0] :
-			"None");
-
-	memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
-	/* scsiLuVendorId */
-	for (j = 0; j < 8; j++)
-		str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
-			DEV_T10_WWN(dev)->vendor[j] : 0x20;
-	str[8] = 0;
-	seq_printf(seq, " %s", str);
-
-	/* scsiLuProductId */
-	for (j = 0; j < 16; j++)
-		str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
-			DEV_T10_WWN(dev)->model[j] : 0x20;
-	str[16] = 0;
-	seq_printf(seq, " %s", str);
-
-	/* scsiLuRevisionId */
-	for (j = 0; j < 4; j++)
-		str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
-			DEV_T10_WWN(dev)->revision[j] : 0x20;
-	str[4] = 0;
-	seq_printf(seq, " %s", str);
-
-	seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n",
-		/* scsiLuPeripheralType */
-		   TRANSPORT(dev)->get_device_type(dev),
-		   (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
-		"available" : "notavailable", /* scsiLuStatus */
-		"exposed", 	/* scsiLuState */
-		(unsigned long long)dev->num_cmds,
-		/* scsiLuReadMegaBytes */
-		(u32)(dev->read_bytes >> 20),
-		/* scsiLuWrittenMegaBytes */
-		(u32)(dev->write_bytes >> 20),
-		dev->num_resets, /* scsiLuInResets */
-		0, /* scsiLuOutTaskSetFullStatus */
-		0, /* scsiLuHSInCommands */
-		(u32)(((u32)dev->creation_time - INITIAL_JIFFIES) *
-							100 / HZ));
-
-	return 0;
-}
-
-static const struct seq_operations scsi_lu_seq_ops = {
-	.start  = scsi_lu_seq_start,
-	.next   = scsi_lu_seq_next,
-	.stop   = scsi_lu_seq_stop,
-	.show   = scsi_lu_seq_show
-};
-
-static int scsi_lu_seq_open(struct inode *inode, struct file *file)
-{
-	return seq_open(file, &scsi_lu_seq_ops);
-}
-
-static const struct file_operations scsi_lu_seq_fops = {
-	.owner	 = THIS_MODULE,
-	.open	 = scsi_lu_seq_open,
-	.read	 = seq_read,
-	.llseek	 = seq_lseek,
-	.release = seq_release,
-};
-
-/****************************************************************************/
-
-/*
- * Remove proc fs entries
- */
-void remove_scsi_target_mib(void)
-{
-	remove_proc_entry("scsi_target/mib/scsi_inst", NULL);
-	remove_proc_entry("scsi_target/mib/scsi_dev", NULL);
-	remove_proc_entry("scsi_target/mib/scsi_port", NULL);
-	remove_proc_entry("scsi_target/mib/scsi_transport", NULL);
-	remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL);
-	remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL);
-	remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL);
-	remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL);
-	remove_proc_entry("scsi_target/mib/scsi_lu", NULL);
-	remove_proc_entry("scsi_target/mib", NULL);
-}
-
-/*
- * Create proc fs entries for the mib tables
- */
-int init_scsi_target_mib(void)
-{
-	struct proc_dir_entry *dir_entry;
-	struct proc_dir_entry *scsi_inst_entry;
-	struct proc_dir_entry *scsi_dev_entry;
-	struct proc_dir_entry *scsi_port_entry;
-	struct proc_dir_entry *scsi_transport_entry;
-	struct proc_dir_entry *scsi_tgt_dev_entry;
-	struct proc_dir_entry *scsi_tgt_port_entry;
-	struct proc_dir_entry *scsi_auth_intr_entry;
-	struct proc_dir_entry *scsi_att_intr_port_entry;
-	struct proc_dir_entry *scsi_lu_entry;
-
-	dir_entry = proc_mkdir("scsi_target/mib", NULL);
-	if (!(dir_entry)) {
-		printk(KERN_ERR "proc_mkdir() failed.\n");
-		return -1;
-	}
-
-	scsi_inst_entry =
-		create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL);
-	if (scsi_inst_entry)
-		scsi_inst_entry->proc_fops = &scsi_inst_seq_fops;
-	else
-		goto error;
-
-	scsi_dev_entry =
-		create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL);
-	if (scsi_dev_entry)
-		scsi_dev_entry->proc_fops = &scsi_dev_seq_fops;
-	else
-		goto error;
-
-	scsi_port_entry =
-		create_proc_entry("scsi_target/mib/scsi_port", 0, NULL);
-	if (scsi_port_entry)
-		scsi_port_entry->proc_fops = &scsi_port_seq_fops;
-	else
-		goto error;
-
-	scsi_transport_entry =
-		create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL);
-	if (scsi_transport_entry)
-		scsi_transport_entry->proc_fops = &scsi_transport_seq_fops;
-	else
-		goto error;
-
-	scsi_tgt_dev_entry =
-		create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL);
-	if (scsi_tgt_dev_entry)
-		scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops;
-	else
-		goto error;
-
-	scsi_tgt_port_entry =
-		create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL);
-	if (scsi_tgt_port_entry)
-		scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops;
-	else
-		goto error;
-
-	scsi_auth_intr_entry =
-		create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL);
-	if (scsi_auth_intr_entry)
-		scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops;
-	else
-		goto error;
-
-	scsi_att_intr_port_entry =
-	      create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL);
-	if (scsi_att_intr_port_entry)
-		scsi_att_intr_port_entry->proc_fops =
-				&scsi_att_intr_port_seq_fops;
-	else
-		goto error;
-
-	scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL);
-	if (scsi_lu_entry)
-		scsi_lu_entry->proc_fops = &scsi_lu_seq_fops;
-	else
-		goto error;
-
-	return 0;
-
-error:
-	printk(KERN_ERR "create_proc_entry() failed.\n");
-	remove_scsi_target_mib();
-	return -1;
-}
-
-/*
- * Initialize the index table for allocating unique row indexes to various mib
- * tables
- */
-void init_scsi_index_table(void)
-{
-	memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
-	spin_lock_init(&scsi_index_table.lock);
-}
-
-/*
- * Allocate a new row index for the entry type specified
- */
-u32 scsi_get_new_index(scsi_index_t type)
-{
-	u32 new_index;
-
-	if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
-		printk(KERN_ERR "Invalid index type %d\n", type);
-		return -1;
-	}
-
-	spin_lock(&scsi_index_table.lock);
-	new_index = ++scsi_index_table.scsi_mib_index[type];
-	if (new_index == 0)
-		new_index = ++scsi_index_table.scsi_mib_index[type];
-	spin_unlock(&scsi_index_table.lock);
-
-	return new_index;
-}
-EXPORT_SYMBOL(scsi_get_new_index);
diff --git a/drivers/target/target_core_mib.h b/drivers/target/target_core_mib.h
deleted file mode 100644
index 2772046..0000000
--- a/drivers/target/target_core_mib.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef TARGET_CORE_MIB_H
-#define TARGET_CORE_MIB_H
-
-typedef enum {
-	SCSI_INST_INDEX,
-	SCSI_DEVICE_INDEX,
-	SCSI_AUTH_INTR_INDEX,
-	SCSI_INDEX_TYPE_MAX
-} scsi_index_t;
-
-struct scsi_index_table {
-	spinlock_t	lock;
-	u32 		scsi_mib_index[SCSI_INDEX_TYPE_MAX];
-} ____cacheline_aligned;
-
-/* SCSI Port stats */
-struct scsi_port_stats {
-	u64	cmd_pdus;
-	u64	tx_data_octets;
-	u64	rx_data_octets;
-} ____cacheline_aligned;
-
-extern int init_scsi_target_mib(void);
-extern void remove_scsi_target_mib(void);
-extern void init_scsi_index_table(void);
-extern u32 scsi_get_new_index(scsi_index_t);
-
-#endif   /*** TARGET_CORE_MIB_H ***/
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 742d246..f2a0847 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -462,8 +462,8 @@
 	 */
 	bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
 				FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
-	if (!(bd)) {
-		printk("pSCSI: blkdev_get_by_path() failed\n");
+	if (IS_ERR(bd)) {
+		printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n");
 		scsi_device_put(sd);
 		return NULL;
 	}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index abfa81a..c26f674 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -275,7 +275,6 @@
 	spin_lock_init(&acl->device_list_lock);
 	spin_lock_init(&acl->nacl_sess_lock);
 	atomic_set(&acl->acl_pr_ref_count, 0);
-	atomic_set(&acl->mib_ref_count, 0);
 	acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
 	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
 	acl->se_tpg = tpg;
@@ -318,12 +317,6 @@
 		cpu_relax();
 }
 
-void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl)
-{
-	while (atomic_read(&nacl->mib_ref_count) != 0)
-		cpu_relax();
-}
-
 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
 {
 	int i, ret;
@@ -480,7 +473,6 @@
 	spin_unlock_bh(&tpg->session_lock);
 
 	core_tpg_wait_for_nacl_pr_ref(acl);
-	core_tpg_wait_for_mib_ref(acl);
 	core_clear_initiator_node_from_tpg(acl, tpg);
 	core_free_device_list_for_node(acl, tpg);
 
@@ -701,6 +693,8 @@
 
 int core_tpg_deregister(struct se_portal_group *se_tpg)
 {
+	struct se_node_acl *nacl, *nacl_tmp;
+
 	printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
 		" for endpoint: %s Portal Tag %u\n",
 		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
@@ -714,6 +708,25 @@
 
 	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
 		cpu_relax();
+	/*
+	 * Release any remaining demo-mode generated se_node_acl that have
+	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
+	 * in transport_deregister_session().
+	 */
+	spin_lock_bh(&se_tpg->acl_node_lock);
+	list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
+			acl_list) {
+		list_del(&nacl->acl_list);
+		se_tpg->num_node_acls--;
+		spin_unlock_bh(&se_tpg->acl_node_lock);
+
+		core_tpg_wait_for_nacl_pr_ref(nacl);
+		core_free_device_list_for_node(nacl, se_tpg);
+		TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl);
+
+		spin_lock_bh(&se_tpg->acl_node_lock);
+	}
+	spin_unlock_bh(&se_tpg->acl_node_lock);
 
 	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
 		core_tpg_release_virtual_lun0(se_tpg);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 28b6292..236e22d 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -379,6 +379,40 @@
 	se_global = NULL;
 }
 
+/* SCSI statistics table index */
+static struct scsi_index_table scsi_index_table;
+
+/*
+ * Initialize the index table for allocating unique row indexes to various mib
+ * tables.
+ */
+void init_scsi_index_table(void)
+{
+	memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
+	spin_lock_init(&scsi_index_table.lock);
+}
+
+/*
+ * Allocate a new row index for the entry type specified
+ */
+u32 scsi_get_new_index(scsi_index_t type)
+{
+	u32 new_index;
+
+	if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
+		printk(KERN_ERR "Invalid index type %d\n", type);
+		return -EINVAL;
+	}
+
+	spin_lock(&scsi_index_table.lock);
+	new_index = ++scsi_index_table.scsi_mib_index[type];
+	if (new_index == 0)
+		new_index = ++scsi_index_table.scsi_mib_index[type];
+	spin_unlock(&scsi_index_table.lock);
+
+	return new_index;
+}
+
 void transport_init_queue_obj(struct se_queue_obj *qobj)
 {
 	atomic_set(&qobj->queue_cnt, 0);
@@ -437,7 +471,6 @@
 	}
 	INIT_LIST_HEAD(&se_sess->sess_list);
 	INIT_LIST_HEAD(&se_sess->sess_acl_list);
-	atomic_set(&se_sess->mib_ref_count, 0);
 
 	return se_sess;
 }
@@ -546,12 +579,6 @@
 		transport_free_session(se_sess);
 		return;
 	}
-	/*
-	 * Wait for possible reference in drivers/target/target_core_mib.c:
-	 * scsi_att_intr_port_seq_show()
-	 */
-	while (atomic_read(&se_sess->mib_ref_count) != 0)
-		cpu_relax();
 
 	spin_lock_bh(&se_tpg->session_lock);
 	list_del(&se_sess->sess_list);
@@ -574,7 +601,6 @@
 				spin_unlock_bh(&se_tpg->acl_node_lock);
 
 				core_tpg_wait_for_nacl_pr_ref(se_nacl);
-				core_tpg_wait_for_mib_ref(se_nacl);
 				core_free_device_list_for_node(se_nacl, se_tpg);
 				TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
 						se_nacl);
@@ -4827,6 +4853,8 @@
 
 		return ret;
 	}
+
+	BUG_ON(list_empty(se_mem_list));
 	/*
 	 * This is the normal path for all normal non BIDI and BIDI-COMMAND
 	 * WRITE payloads..  If we need to do BIDI READ passthrough for
@@ -5008,7 +5036,9 @@
 		struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
 		u32 se_mem_cnt = 0, task_offset = 0;
 
-		BUG_ON(list_empty(cmd->t_task->t_mem_list));
+		if (!list_empty(T_TASK(cmd)->t_mem_list))
+			se_mem = list_entry(T_TASK(cmd)->t_mem_list->next,
+					struct se_mem, se_list);
 
 		ret = transport_do_se_mem_map(dev, task,
 				cmd->t_task->t_mem_list, NULL, se_mem,
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index c43ef48..3962772 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -9,3 +9,5 @@
 obj-$(CONFIG_R3964)		+= n_r3964.o
 
 obj-y				+= vt/
+obj-$(CONFIG_HVC_DRIVER)	+= hvc/
+obj-y				+= serial/
diff --git a/drivers/tty/hvc/Makefile b/drivers/tty/hvc/Makefile
new file mode 100644
index 0000000..d79e7e9
--- /dev/null
+++ b/drivers/tty/hvc/Makefile
@@ -0,0 +1,12 @@
+obj-$(CONFIG_HVC_CONSOLE)	+= hvc_vio.o hvsi.o
+obj-$(CONFIG_HVC_ISERIES)	+= hvc_iseries.o
+obj-$(CONFIG_HVC_RTAS)		+= hvc_rtas.o
+obj-$(CONFIG_HVC_TILE)		+= hvc_tile.o
+obj-$(CONFIG_HVC_DCC)		+= hvc_dcc.o
+obj-$(CONFIG_HVC_BEAT)		+= hvc_beat.o
+obj-$(CONFIG_HVC_DRIVER)	+= hvc_console.o
+obj-$(CONFIG_HVC_IRQ)		+= hvc_irq.o
+obj-$(CONFIG_HVC_XEN)		+= hvc_xen.o
+obj-$(CONFIG_HVC_IUCV)		+= hvc_iucv.o
+obj-$(CONFIG_HVC_UDBG)		+= hvc_udbg.o
+obj-$(CONFIG_HVCS)		+= hvcs.o
diff --git a/drivers/char/hvc_beat.c b/drivers/tty/hvc/hvc_beat.c
similarity index 100%
rename from drivers/char/hvc_beat.c
rename to drivers/tty/hvc/hvc_beat.c
diff --git a/drivers/char/hvc_console.c b/drivers/tty/hvc/hvc_console.c
similarity index 100%
rename from drivers/char/hvc_console.c
rename to drivers/tty/hvc/hvc_console.c
diff --git a/drivers/char/hvc_console.h b/drivers/tty/hvc/hvc_console.h
similarity index 100%
rename from drivers/char/hvc_console.h
rename to drivers/tty/hvc/hvc_console.h
diff --git a/drivers/char/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
similarity index 100%
rename from drivers/char/hvc_dcc.c
rename to drivers/tty/hvc/hvc_dcc.c
diff --git a/drivers/char/hvc_irq.c b/drivers/tty/hvc/hvc_irq.c
similarity index 100%
rename from drivers/char/hvc_irq.c
rename to drivers/tty/hvc/hvc_irq.c
diff --git a/drivers/char/hvc_iseries.c b/drivers/tty/hvc/hvc_iseries.c
similarity index 100%
rename from drivers/char/hvc_iseries.c
rename to drivers/tty/hvc/hvc_iseries.c
diff --git a/drivers/char/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
similarity index 100%
rename from drivers/char/hvc_iucv.c
rename to drivers/tty/hvc/hvc_iucv.c
diff --git a/drivers/char/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c
similarity index 100%
rename from drivers/char/hvc_rtas.c
rename to drivers/tty/hvc/hvc_rtas.c
diff --git a/drivers/char/hvc_tile.c b/drivers/tty/hvc/hvc_tile.c
similarity index 100%
rename from drivers/char/hvc_tile.c
rename to drivers/tty/hvc/hvc_tile.c
diff --git a/drivers/char/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c
similarity index 100%
rename from drivers/char/hvc_udbg.c
rename to drivers/tty/hvc/hvc_udbg.c
diff --git a/drivers/char/hvc_vio.c b/drivers/tty/hvc/hvc_vio.c
similarity index 100%
rename from drivers/char/hvc_vio.c
rename to drivers/tty/hvc/hvc_vio.c
diff --git a/drivers/char/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
similarity index 100%
rename from drivers/char/hvc_xen.c
rename to drivers/tty/hvc/hvc_xen.c
diff --git a/drivers/char/hvcs.c b/drivers/tty/hvc/hvcs.c
similarity index 100%
rename from drivers/char/hvcs.c
rename to drivers/tty/hvc/hvcs.c
diff --git a/drivers/char/hvsi.c b/drivers/tty/hvc/hvsi.c
similarity index 100%
rename from drivers/char/hvsi.c
rename to drivers/tty/hvc/hvsi.c
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 44b8412..aa2e5d3 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2414,6 +2414,7 @@
 
 	gsm->initiator = c->initiator;
 	gsm->mru = c->mru;
+	gsm->mtu = c->mtu;
 	gsm->encoding = c->encapsulation;
 	gsm->adaption = c->adaption;
 	gsm->n2 = c->n2;
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 47d3228..52fc0c9 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -581,8 +581,9 @@
 			   __u8 __user *buf, size_t nr)
 {
 	struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
-	int ret;
+	int ret = 0;
 	struct n_hdlc_buf *rbuf;
+	DECLARE_WAITQUEUE(wait, current);
 
 	if (debuglevel >= DEBUG_LEVEL_INFO)	
 		printk("%s(%d)n_hdlc_tty_read() called\n",__FILE__,__LINE__);
@@ -598,57 +599,55 @@
 		return -EFAULT;
 	}
 
-	tty_lock();
+	add_wait_queue(&tty->read_wait, &wait);
 
 	for (;;) {
 		if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
-			tty_unlock();
-			return -EIO;
+			ret = -EIO;
+			break;
 		}
+		if (tty_hung_up_p(file))
+			break;
 
-		n_hdlc = tty2n_hdlc (tty);
-		if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC ||
-			 tty != n_hdlc->tty) {
-			tty_unlock();
-			return 0;
-		}
+		set_current_state(TASK_INTERRUPTIBLE);
 
 		rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list);
-		if (rbuf)
+		if (rbuf) {
+			if (rbuf->count > nr) {
+				/* too large for caller's buffer */
+				ret = -EOVERFLOW;
+			} else {
+				if (copy_to_user(buf, rbuf->buf, rbuf->count))
+					ret = -EFAULT;
+				else
+					ret = rbuf->count;
+			}
+
+			if (n_hdlc->rx_free_buf_list.count >
+			    DEFAULT_RX_BUF_COUNT)
+				kfree(rbuf);
+			else
+				n_hdlc_buf_put(&n_hdlc->rx_free_buf_list, rbuf);
 			break;
+		}
 			
 		/* no data */
 		if (file->f_flags & O_NONBLOCK) {
-			tty_unlock();
-			return -EAGAIN;
+			ret = -EAGAIN;
+			break;
 		}
-			
-		interruptible_sleep_on (&tty->read_wait);
+
+		schedule();
+
 		if (signal_pending(current)) {
-			tty_unlock();
-			return -EINTR;
+			ret = -EINTR;
+			break;
 		}
 	}
-		
-	if (rbuf->count > nr)
-		/* frame too large for caller's buffer (discard frame) */
-		ret = -EOVERFLOW;
-	else {
-		/* Copy the data to the caller's buffer */
-		if (copy_to_user(buf, rbuf->buf, rbuf->count))
-			ret = -EFAULT;
-		else
-			ret = rbuf->count;
-	}
-	
-	/* return HDLC buffer to free list unless the free list */
-	/* count has exceeded the default value, in which case the */
-	/* buffer is freed back to the OS to conserve memory */
-	if (n_hdlc->rx_free_buf_list.count > DEFAULT_RX_BUF_COUNT)
-		kfree(rbuf);
-	else	
-		n_hdlc_buf_put(&n_hdlc->rx_free_buf_list,rbuf);
-	tty_unlock();
+
+	remove_wait_queue(&tty->read_wait, &wait);
+	__set_current_state(TASK_RUNNING);
+
 	return ret;
 	
 }	/* end of n_hdlc_tty_read() */
@@ -691,14 +690,15 @@
 		count = maxframe;
 	}
 	
-	tty_lock();
-
 	add_wait_queue(&tty->write_wait, &wait);
-	set_current_state(TASK_INTERRUPTIBLE);
+
+	for (;;) {
+		set_current_state(TASK_INTERRUPTIBLE);
 	
-	/* Allocate transmit buffer */
-	/* sleep until transmit buffer available */		
-	while (!(tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list))) {
+		tbuf = n_hdlc_buf_get(&n_hdlc->tx_free_buf_list);
+		if (tbuf)
+			break;
+
 		if (file->f_flags & O_NONBLOCK) {
 			error = -EAGAIN;
 			break;
@@ -719,7 +719,7 @@
 		}
 	}
 
-	set_current_state(TASK_RUNNING);
+	__set_current_state(TASK_RUNNING);
 	remove_wait_queue(&tty->write_wait, &wait);
 
 	if (!error) {		
@@ -731,7 +731,7 @@
 		n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf);
 		n_hdlc_send_frames(n_hdlc,tty);
 	}
-	tty_unlock();
+
 	return error;
 	
 }	/* end of n_hdlc_tty_write() */
diff --git a/drivers/serial/21285.c b/drivers/tty/serial/21285.c
similarity index 100%
rename from drivers/serial/21285.c
rename to drivers/tty/serial/21285.c
diff --git a/drivers/serial/68328serial.c b/drivers/tty/serial/68328serial.c
similarity index 98%
rename from drivers/serial/68328serial.c
rename to drivers/tty/serial/68328serial.c
index be0ebce..de0160e 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/tty/serial/68328serial.c
@@ -262,7 +262,7 @@
 
 static void receive_chars(struct m68k_serial *info, unsigned short rx)
 {
-	struct tty_struct *tty = info->port.tty;
+	struct tty_struct *tty = info->tty;
 	m68328_uart *uart = &uart_addr[info->line];
 	unsigned char ch, flag;
 
@@ -329,7 +329,7 @@
 		goto clear_and_return;
 	}
 
-	if((info->xmit_cnt <= 0) || info->port.tty->stopped) {
+	if((info->xmit_cnt <= 0) || info->tty->stopped) {
 		/* That's peculiar... TX ints off */
 		uart->ustcnt &= ~USTCNT_TX_INTR_MASK;
 		goto clear_and_return;
@@ -383,7 +383,7 @@
 	struct m68k_serial	*info = container_of(work, struct m68k_serial, tqueue);
 	struct tty_struct	*tty;
 	
-	tty = info->port.tty;
+	tty = info->tty;
 	if (!tty)
 		return;
 #if 0
@@ -407,7 +407,7 @@
 	struct m68k_serial	*info = container_of(work, struct m68k_serial, tqueue_hangup);
 	struct tty_struct	*tty;
 	
-	tty = info->port.tty;
+	tty = info->tty;
 	if (!tty)
 		return;
 
@@ -451,8 +451,8 @@
 	uart->ustcnt = USTCNT_UEN | USTCNT_RXEN | USTCNT_RX_INTR_MASK;
 #endif
 
-	if (info->port.tty)
-		clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
+	if (info->tty)
+		clear_bit(TTY_IO_ERROR, &info->tty->flags);
 	info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
 
 	/*
@@ -486,8 +486,8 @@
 		info->xmit_buf = 0;
 	}
 
-	if (info->port.tty)
-		set_bit(TTY_IO_ERROR, &info->port.tty->flags);
+	if (info->tty)
+		set_bit(TTY_IO_ERROR, &info->tty->flags);
 	
 	info->flags &= ~S_INITIALIZED;
 	local_irq_restore(flags);
@@ -553,9 +553,9 @@
 	unsigned cflag;
 	int	i;
 
-	if (!info->port.tty || !info->port.tty->termios)
+	if (!info->tty || !info->tty->termios)
 		return;
-	cflag = info->port.tty->termios->c_cflag;
+	cflag = info->tty->termios->c_cflag;
 	if (!(port = info->port))
 		return;
 
@@ -970,7 +970,6 @@
 static int rs_ioctl(struct tty_struct *tty, struct file * file,
 		    unsigned int cmd, unsigned long arg)
 {
-	int error;
 	struct m68k_serial * info = (struct m68k_serial *)tty->driver_data;
 	int retval;
 
@@ -1104,7 +1103,7 @@
 	tty_ldisc_flush(tty);
 	tty->closing = 0;
 	info->event = 0;
-	info->port.tty = NULL;
+	info->tty = NULL;
 #warning "This is not and has never been valid so fix it"	
 #if 0
 	if (tty->ldisc.num != ldiscs[N_TTY].num) {
@@ -1142,7 +1141,7 @@
 	info->event = 0;
 	info->count = 0;
 	info->flags &= ~S_NORMAL_ACTIVE;
-	info->port.tty = NULL;
+	info->tty = NULL;
 	wake_up_interruptible(&info->open_wait);
 }
 
@@ -1261,7 +1260,7 @@
 
 	info->count++;
 	tty->driver_data = info;
-	info->port.tty = tty;
+	info->tty = tty;
 
 	/*
 	 * Start up serial port
@@ -1338,7 +1337,7 @@
 	    info = &m68k_soft[i];
 	    info->magic = SERIAL_MAGIC;
 	    info->port = (int) &uart_addr[i];
-	    info->port.tty = NULL;
+	    info->tty = NULL;
 	    info->irq = uart_irqs[i];
 	    info->custom_divisor = 16;
 	    info->close_delay = 50;
diff --git a/drivers/serial/68328serial.h b/drivers/tty/serial/68328serial.h
similarity index 100%
rename from drivers/serial/68328serial.h
rename to drivers/tty/serial/68328serial.h
diff --git a/drivers/serial/68360serial.c b/drivers/tty/serial/68360serial.c
similarity index 99%
rename from drivers/serial/68360serial.c
rename to drivers/tty/serial/68360serial.c
index 88b1335..bc21eea 100644
--- a/drivers/serial/68360serial.c
+++ b/drivers/tty/serial/68360serial.c
@@ -2428,6 +2428,7 @@
 	/* .read_proc = rs_360_read_proc, */
 	.tiocmget = rs_360_tiocmget,
 	.tiocmset = rs_360_tiocmset,
+	.get_icount = rs_360_get_icount,
 };
 
 static int __init rs_360_init(void)
diff --git a/drivers/serial/8250.c b/drivers/tty/serial/8250.c
similarity index 99%
rename from drivers/serial/8250.c
rename to drivers/tty/serial/8250.c
index b25e6e4..3975df6 100644
--- a/drivers/serial/8250.c
+++ b/drivers/tty/serial/8250.c
@@ -236,7 +236,8 @@
 		.fifo_size	= 128,
 		.tx_loadsz	= 128,
 		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
-		.flags		= UART_CAP_FIFO | UART_CAP_EFR | UART_CAP_SLEEP,
+		/* UART_CAP_EFR breaks billionon CF bluetooth card. */
+		.flags		= UART_CAP_FIFO | UART_CAP_SLEEP,
 	},
 	[PORT_16654] = {
 		.name		= "ST16654",
diff --git a/drivers/serial/8250.h b/drivers/tty/serial/8250.h
similarity index 100%
rename from drivers/serial/8250.h
rename to drivers/tty/serial/8250.h
diff --git a/drivers/serial/8250_accent.c b/drivers/tty/serial/8250_accent.c
similarity index 100%
rename from drivers/serial/8250_accent.c
rename to drivers/tty/serial/8250_accent.c
diff --git a/drivers/serial/8250_acorn.c b/drivers/tty/serial/8250_acorn.c
similarity index 100%
rename from drivers/serial/8250_acorn.c
rename to drivers/tty/serial/8250_acorn.c
diff --git a/drivers/serial/8250_boca.c b/drivers/tty/serial/8250_boca.c
similarity index 100%
rename from drivers/serial/8250_boca.c
rename to drivers/tty/serial/8250_boca.c
diff --git a/drivers/serial/8250_early.c b/drivers/tty/serial/8250_early.c
similarity index 100%
rename from drivers/serial/8250_early.c
rename to drivers/tty/serial/8250_early.c
diff --git a/drivers/serial/8250_exar_st16c554.c b/drivers/tty/serial/8250_exar_st16c554.c
similarity index 100%
rename from drivers/serial/8250_exar_st16c554.c
rename to drivers/tty/serial/8250_exar_st16c554.c
diff --git a/drivers/serial/8250_fourport.c b/drivers/tty/serial/8250_fourport.c
similarity index 100%
rename from drivers/serial/8250_fourport.c
rename to drivers/tty/serial/8250_fourport.c
diff --git a/drivers/serial/8250_gsc.c b/drivers/tty/serial/8250_gsc.c
similarity index 100%
rename from drivers/serial/8250_gsc.c
rename to drivers/tty/serial/8250_gsc.c
diff --git a/drivers/serial/8250_hp300.c b/drivers/tty/serial/8250_hp300.c
similarity index 100%
rename from drivers/serial/8250_hp300.c
rename to drivers/tty/serial/8250_hp300.c
diff --git a/drivers/serial/8250_hub6.c b/drivers/tty/serial/8250_hub6.c
similarity index 100%
rename from drivers/serial/8250_hub6.c
rename to drivers/tty/serial/8250_hub6.c
diff --git a/drivers/serial/8250_mca.c b/drivers/tty/serial/8250_mca.c
similarity index 100%
rename from drivers/serial/8250_mca.c
rename to drivers/tty/serial/8250_mca.c
diff --git a/drivers/serial/8250_pci.c b/drivers/tty/serial/8250_pci.c
similarity index 100%
rename from drivers/serial/8250_pci.c
rename to drivers/tty/serial/8250_pci.c
diff --git a/drivers/serial/8250_pnp.c b/drivers/tty/serial/8250_pnp.c
similarity index 100%
rename from drivers/serial/8250_pnp.c
rename to drivers/tty/serial/8250_pnp.c
diff --git a/drivers/serial/Kconfig b/drivers/tty/serial/Kconfig
similarity index 99%
rename from drivers/serial/Kconfig
rename to drivers/tty/serial/Kconfig
index c1df767..2b83346 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -81,7 +81,7 @@
 	default SERIAL_8250
 
 config SERIAL_8250_PCI
-	tristate "8250/16550 PCI device support" if EMBEDDED
+	tristate "8250/16550 PCI device support" if EXPERT
 	depends on SERIAL_8250 && PCI
 	default SERIAL_8250
 	help
@@ -90,7 +90,7 @@
 	  Saves about 9K.
 
 config SERIAL_8250_PNP
-	tristate "8250/16550 PNP device support" if EMBEDDED
+	tristate "8250/16550 PNP device support" if EXPERT
 	depends on SERIAL_8250 && PNP
 	default SERIAL_8250
 	help
@@ -1518,6 +1518,7 @@
 config SERIAL_GRLIB_GAISLER_APBUART
 	tristate "GRLIB APBUART serial support"
 	depends on OF
+	select SERIAL_CORE
 	---help---
 	Add support for the GRLIB APBUART serial port.
 
diff --git a/drivers/serial/Makefile b/drivers/tty/serial/Makefile
similarity index 100%
rename from drivers/serial/Makefile
rename to drivers/tty/serial/Makefile
diff --git a/drivers/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
similarity index 100%
rename from drivers/serial/altera_jtaguart.c
rename to drivers/tty/serial/altera_jtaguart.c
diff --git a/drivers/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
similarity index 100%
rename from drivers/serial/altera_uart.c
rename to drivers/tty/serial/altera_uart.c
diff --git a/drivers/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
similarity index 100%
rename from drivers/serial/amba-pl010.c
rename to drivers/tty/serial/amba-pl010.c
diff --git a/drivers/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
similarity index 100%
rename from drivers/serial/amba-pl011.c
rename to drivers/tty/serial/amba-pl011.c
diff --git a/drivers/serial/apbuart.c b/drivers/tty/serial/apbuart.c
similarity index 100%
rename from drivers/serial/apbuart.c
rename to drivers/tty/serial/apbuart.c
diff --git a/drivers/serial/apbuart.h b/drivers/tty/serial/apbuart.h
similarity index 100%
rename from drivers/serial/apbuart.h
rename to drivers/tty/serial/apbuart.h
diff --git a/drivers/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
similarity index 100%
rename from drivers/serial/atmel_serial.c
rename to drivers/tty/serial/atmel_serial.c
diff --git a/drivers/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
similarity index 100%
rename from drivers/serial/bcm63xx_uart.c
rename to drivers/tty/serial/bcm63xx_uart.c
diff --git a/drivers/serial/bfin_5xx.c b/drivers/tty/serial/bfin_5xx.c
similarity index 98%
rename from drivers/serial/bfin_5xx.c
rename to drivers/tty/serial/bfin_5xx.c
index e381b89..9b1ff2b 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/tty/serial/bfin_5xx.c
@@ -370,10 +370,8 @@
 {
 	struct bfin_serial_port *uart = dev_id;
 
-	spin_lock(&uart->port.lock);
 	while (UART_GET_LSR(uart) & DR)
 		bfin_serial_rx_chars(uart);
-	spin_unlock(&uart->port.lock);
 
 	return IRQ_HANDLED;
 }
@@ -490,9 +488,8 @@
 {
 	int x_pos, pos;
 
-	dma_disable_irq(uart->tx_dma_channel);
-	dma_disable_irq(uart->rx_dma_channel);
-	spin_lock_bh(&uart->port.lock);
+	dma_disable_irq_nosync(uart->rx_dma_channel);
+	spin_lock_bh(&uart->rx_lock);
 
 	/* 2D DMA RX buffer ring is used. Because curr_y_count and
 	 * curr_x_count can't be read as an atomic operation,
@@ -523,8 +520,7 @@
 		uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
 	}
 
-	spin_unlock_bh(&uart->port.lock);
-	dma_enable_irq(uart->tx_dma_channel);
+	spin_unlock_bh(&uart->rx_lock);
 	dma_enable_irq(uart->rx_dma_channel);
 
 	mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
@@ -571,7 +567,7 @@
 	unsigned short irqstat;
 	int x_pos, pos;
 
-	spin_lock(&uart->port.lock);
+	spin_lock(&uart->rx_lock);
 	irqstat = get_dma_curr_irqstat(uart->rx_dma_channel);
 	clear_dma_irqstat(uart->rx_dma_channel);
 
@@ -589,7 +585,7 @@
 		uart->rx_dma_buf.tail = uart->rx_dma_buf.head;
 	}
 
-	spin_unlock(&uart->port.lock);
+	spin_unlock(&uart->rx_lock);
 
 	return IRQ_HANDLED;
 }
@@ -1332,6 +1328,7 @@
 		}
 
 #ifdef CONFIG_SERIAL_BFIN_DMA
+		spin_lock_init(&uart->rx_lock);
 		uart->tx_done	    = 1;
 		uart->tx_count	    = 0;
 
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/tty/serial/bfin_sport_uart.c
similarity index 100%
rename from drivers/serial/bfin_sport_uart.c
rename to drivers/tty/serial/bfin_sport_uart.c
diff --git a/drivers/serial/bfin_sport_uart.h b/drivers/tty/serial/bfin_sport_uart.h
similarity index 100%
rename from drivers/serial/bfin_sport_uart.h
rename to drivers/tty/serial/bfin_sport_uart.h
diff --git a/drivers/serial/clps711x.c b/drivers/tty/serial/clps711x.c
similarity index 100%
rename from drivers/serial/clps711x.c
rename to drivers/tty/serial/clps711x.c
diff --git a/drivers/serial/cpm_uart/Makefile b/drivers/tty/serial/cpm_uart/Makefile
similarity index 100%
rename from drivers/serial/cpm_uart/Makefile
rename to drivers/tty/serial/cpm_uart/Makefile
diff --git a/drivers/serial/cpm_uart/cpm_uart.h b/drivers/tty/serial/cpm_uart/cpm_uart.h
similarity index 100%
rename from drivers/serial/cpm_uart/cpm_uart.h
rename to drivers/tty/serial/cpm_uart/cpm_uart.h
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
similarity index 100%
rename from drivers/serial/cpm_uart/cpm_uart_core.c
rename to drivers/tty/serial/cpm_uart/cpm_uart_core.c
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
similarity index 100%
rename from drivers/serial/cpm_uart/cpm_uart_cpm1.c
rename to drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm1.h b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h
similarity index 100%
rename from drivers/serial/cpm_uart/cpm_uart_cpm1.h
rename to drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
similarity index 100%
rename from drivers/serial/cpm_uart/cpm_uart_cpm2.c
rename to drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.h b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h
similarity index 100%
rename from drivers/serial/cpm_uart/cpm_uart_cpm2.h
rename to drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h
diff --git a/drivers/serial/crisv10.c b/drivers/tty/serial/crisv10.c
similarity index 100%
rename from drivers/serial/crisv10.c
rename to drivers/tty/serial/crisv10.c
diff --git a/drivers/serial/crisv10.h b/drivers/tty/serial/crisv10.h
similarity index 100%
rename from drivers/serial/crisv10.h
rename to drivers/tty/serial/crisv10.h
diff --git a/drivers/serial/dz.c b/drivers/tty/serial/dz.c
similarity index 100%
rename from drivers/serial/dz.c
rename to drivers/tty/serial/dz.c
diff --git a/drivers/serial/dz.h b/drivers/tty/serial/dz.h
similarity index 100%
rename from drivers/serial/dz.h
rename to drivers/tty/serial/dz.h
diff --git a/drivers/serial/icom.c b/drivers/tty/serial/icom.c
similarity index 100%
rename from drivers/serial/icom.c
rename to drivers/tty/serial/icom.c
diff --git a/drivers/serial/icom.h b/drivers/tty/serial/icom.h
similarity index 100%
rename from drivers/serial/icom.h
rename to drivers/tty/serial/icom.h
diff --git a/drivers/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
similarity index 100%
rename from drivers/serial/ifx6x60.c
rename to drivers/tty/serial/ifx6x60.c
diff --git a/drivers/serial/ifx6x60.h b/drivers/tty/serial/ifx6x60.h
similarity index 100%
rename from drivers/serial/ifx6x60.h
rename to drivers/tty/serial/ifx6x60.h
diff --git a/drivers/serial/imx.c b/drivers/tty/serial/imx.c
similarity index 100%
rename from drivers/serial/imx.c
rename to drivers/tty/serial/imx.c
diff --git a/drivers/serial/ioc3_serial.c b/drivers/tty/serial/ioc3_serial.c
similarity index 100%
rename from drivers/serial/ioc3_serial.c
rename to drivers/tty/serial/ioc3_serial.c
diff --git a/drivers/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
similarity index 100%
rename from drivers/serial/ioc4_serial.c
rename to drivers/tty/serial/ioc4_serial.c
diff --git a/drivers/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
similarity index 100%
rename from drivers/serial/ip22zilog.c
rename to drivers/tty/serial/ip22zilog.c
diff --git a/drivers/serial/ip22zilog.h b/drivers/tty/serial/ip22zilog.h
similarity index 100%
rename from drivers/serial/ip22zilog.h
rename to drivers/tty/serial/ip22zilog.h
diff --git a/drivers/serial/jsm/Makefile b/drivers/tty/serial/jsm/Makefile
similarity index 100%
rename from drivers/serial/jsm/Makefile
rename to drivers/tty/serial/jsm/Makefile
diff --git a/drivers/serial/jsm/jsm.h b/drivers/tty/serial/jsm/jsm.h
similarity index 100%
rename from drivers/serial/jsm/jsm.h
rename to drivers/tty/serial/jsm/jsm.h
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
similarity index 100%
rename from drivers/serial/jsm/jsm_driver.c
rename to drivers/tty/serial/jsm/jsm_driver.c
diff --git a/drivers/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c
similarity index 100%
rename from drivers/serial/jsm/jsm_neo.c
rename to drivers/tty/serial/jsm/jsm_neo.c
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/tty/serial/jsm/jsm_tty.c
similarity index 100%
rename from drivers/serial/jsm/jsm_tty.c
rename to drivers/tty/serial/jsm/jsm_tty.c
diff --git a/drivers/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
similarity index 100%
rename from drivers/serial/kgdboc.c
rename to drivers/tty/serial/kgdboc.c
diff --git a/drivers/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
similarity index 100%
rename from drivers/serial/m32r_sio.c
rename to drivers/tty/serial/m32r_sio.c
diff --git a/drivers/serial/m32r_sio.h b/drivers/tty/serial/m32r_sio.h
similarity index 100%
rename from drivers/serial/m32r_sio.h
rename to drivers/tty/serial/m32r_sio.h
diff --git a/drivers/serial/m32r_sio_reg.h b/drivers/tty/serial/m32r_sio_reg.h
similarity index 100%
rename from drivers/serial/m32r_sio_reg.h
rename to drivers/tty/serial/m32r_sio_reg.h
diff --git a/drivers/serial/max3100.c b/drivers/tty/serial/max3100.c
similarity index 99%
rename from drivers/serial/max3100.c
rename to drivers/tty/serial/max3100.c
index beb1afa..7b951ad 100644
--- a/drivers/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -601,7 +601,7 @@
 	s->rts = 0;
 
 	sprintf(b, "max3100-%d", s->minor);
-	s->workqueue = create_freezeable_workqueue(b);
+	s->workqueue = create_freezable_workqueue(b);
 	if (!s->workqueue) {
 		dev_warn(&s->spi->dev, "cannot create workqueue\n");
 		return -EBUSY;
diff --git a/drivers/serial/max3107-aava.c b/drivers/tty/serial/max3107-aava.c
similarity index 100%
rename from drivers/serial/max3107-aava.c
rename to drivers/tty/serial/max3107-aava.c
diff --git a/drivers/serial/max3107.c b/drivers/tty/serial/max3107.c
similarity index 99%
rename from drivers/serial/max3107.c
rename to drivers/tty/serial/max3107.c
index 910870e..750b4f6 100644
--- a/drivers/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
@@ -833,7 +833,7 @@
 	struct max3107_port *s = container_of(port, struct max3107_port, port);
 
 	/* Initialize work queue */
-	s->workqueue = create_freezeable_workqueue("max3107");
+	s->workqueue = create_freezable_workqueue("max3107");
 	if (!s->workqueue) {
 		dev_err(&s->spi->dev, "Workqueue creation failed\n");
 		return -EBUSY;
diff --git a/drivers/serial/max3107.h b/drivers/tty/serial/max3107.h
similarity index 100%
rename from drivers/serial/max3107.h
rename to drivers/tty/serial/max3107.h
diff --git a/drivers/serial/mcf.c b/drivers/tty/serial/mcf.c
similarity index 100%
rename from drivers/serial/mcf.c
rename to drivers/tty/serial/mcf.c
diff --git a/drivers/serial/mfd.c b/drivers/tty/serial/mfd.c
similarity index 100%
rename from drivers/serial/mfd.c
rename to drivers/tty/serial/mfd.c
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
similarity index 100%
rename from drivers/serial/mpc52xx_uart.c
rename to drivers/tty/serial/mpc52xx_uart.c
diff --git a/drivers/serial/mpsc.c b/drivers/tty/serial/mpsc.c
similarity index 100%
rename from drivers/serial/mpsc.c
rename to drivers/tty/serial/mpsc.c
diff --git a/drivers/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
similarity index 100%
rename from drivers/serial/mrst_max3110.c
rename to drivers/tty/serial/mrst_max3110.c
diff --git a/drivers/serial/mrst_max3110.h b/drivers/tty/serial/mrst_max3110.h
similarity index 100%
rename from drivers/serial/mrst_max3110.h
rename to drivers/tty/serial/mrst_max3110.h
diff --git a/drivers/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
similarity index 100%
rename from drivers/serial/msm_serial.c
rename to drivers/tty/serial/msm_serial.c
diff --git a/drivers/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
similarity index 100%
rename from drivers/serial/msm_serial.h
rename to drivers/tty/serial/msm_serial.h
diff --git a/drivers/serial/mux.c b/drivers/tty/serial/mux.c
similarity index 100%
rename from drivers/serial/mux.c
rename to drivers/tty/serial/mux.c
diff --git a/drivers/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c
similarity index 100%
rename from drivers/serial/netx-serial.c
rename to drivers/tty/serial/netx-serial.c
diff --git a/drivers/serial/nwpserial.c b/drivers/tty/serial/nwpserial.c
similarity index 100%
rename from drivers/serial/nwpserial.c
rename to drivers/tty/serial/nwpserial.c
diff --git a/drivers/serial/of_serial.c b/drivers/tty/serial/of_serial.c
similarity index 100%
rename from drivers/serial/of_serial.c
rename to drivers/tty/serial/of_serial.c
diff --git a/drivers/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
similarity index 100%
rename from drivers/serial/omap-serial.c
rename to drivers/tty/serial/omap-serial.c
diff --git a/drivers/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
similarity index 100%
rename from drivers/serial/pch_uart.c
rename to drivers/tty/serial/pch_uart.c
diff --git a/drivers/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
similarity index 100%
rename from drivers/serial/pmac_zilog.c
rename to drivers/tty/serial/pmac_zilog.c
diff --git a/drivers/serial/pmac_zilog.h b/drivers/tty/serial/pmac_zilog.h
similarity index 100%
rename from drivers/serial/pmac_zilog.h
rename to drivers/tty/serial/pmac_zilog.h
diff --git a/drivers/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
similarity index 100%
rename from drivers/serial/pnx8xxx_uart.c
rename to drivers/tty/serial/pnx8xxx_uart.c
diff --git a/drivers/serial/pxa.c b/drivers/tty/serial/pxa.c
similarity index 100%
rename from drivers/serial/pxa.c
rename to drivers/tty/serial/pxa.c
diff --git a/drivers/serial/s3c2400.c b/drivers/tty/serial/s3c2400.c
similarity index 100%
rename from drivers/serial/s3c2400.c
rename to drivers/tty/serial/s3c2400.c
diff --git a/drivers/serial/s3c2410.c b/drivers/tty/serial/s3c2410.c
similarity index 100%
rename from drivers/serial/s3c2410.c
rename to drivers/tty/serial/s3c2410.c
diff --git a/drivers/serial/s3c2412.c b/drivers/tty/serial/s3c2412.c
similarity index 100%
rename from drivers/serial/s3c2412.c
rename to drivers/tty/serial/s3c2412.c
diff --git a/drivers/serial/s3c2440.c b/drivers/tty/serial/s3c2440.c
similarity index 100%
rename from drivers/serial/s3c2440.c
rename to drivers/tty/serial/s3c2440.c
diff --git a/drivers/serial/s3c24a0.c b/drivers/tty/serial/s3c24a0.c
similarity index 100%
rename from drivers/serial/s3c24a0.c
rename to drivers/tty/serial/s3c24a0.c
diff --git a/drivers/serial/s3c6400.c b/drivers/tty/serial/s3c6400.c
similarity index 100%
rename from drivers/serial/s3c6400.c
rename to drivers/tty/serial/s3c6400.c
diff --git a/drivers/serial/s5pv210.c b/drivers/tty/serial/s5pv210.c
similarity index 100%
rename from drivers/serial/s5pv210.c
rename to drivers/tty/serial/s5pv210.c
diff --git a/drivers/serial/sa1100.c b/drivers/tty/serial/sa1100.c
similarity index 100%
rename from drivers/serial/sa1100.c
rename to drivers/tty/serial/sa1100.c
diff --git a/drivers/serial/samsung.c b/drivers/tty/serial/samsung.c
similarity index 100%
rename from drivers/serial/samsung.c
rename to drivers/tty/serial/samsung.c
diff --git a/drivers/serial/samsung.h b/drivers/tty/serial/samsung.h
similarity index 100%
rename from drivers/serial/samsung.h
rename to drivers/tty/serial/samsung.h
diff --git a/drivers/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
similarity index 99%
rename from drivers/serial/sb1250-duart.c
rename to drivers/tty/serial/sb1250-duart.c
index a2f2b32..602d984 100644
--- a/drivers/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -829,7 +829,7 @@
 #ifdef CONFIG_SERIAL_SB1250_DUART_CONSOLE
 /*
  * Serial console stuff.  Very basic, polling driver for doing serial
- * console output.  The console_sem is held by the caller, so we
+ * console output.  The console_lock is held by the caller, so we
  * shouldn't be interrupted for more console activity.
  */
 static void sbd_console_putchar(struct uart_port *uport, int ch)
diff --git a/drivers/serial/sc26xx.c b/drivers/tty/serial/sc26xx.c
similarity index 100%
rename from drivers/serial/sc26xx.c
rename to drivers/tty/serial/sc26xx.c
diff --git a/drivers/serial/serial_core.c b/drivers/tty/serial/serial_core.c
similarity index 100%
rename from drivers/serial/serial_core.c
rename to drivers/tty/serial/serial_core.c
diff --git a/drivers/serial/serial_cs.c b/drivers/tty/serial/serial_cs.c
similarity index 100%
rename from drivers/serial/serial_cs.c
rename to drivers/tty/serial/serial_cs.c
diff --git a/drivers/serial/serial_ks8695.c b/drivers/tty/serial/serial_ks8695.c
similarity index 100%
rename from drivers/serial/serial_ks8695.c
rename to drivers/tty/serial/serial_ks8695.c
diff --git a/drivers/serial/serial_lh7a40x.c b/drivers/tty/serial/serial_lh7a40x.c
similarity index 100%
rename from drivers/serial/serial_lh7a40x.c
rename to drivers/tty/serial/serial_lh7a40x.c
diff --git a/drivers/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
similarity index 100%
rename from drivers/serial/serial_txx9.c
rename to drivers/tty/serial/serial_txx9.c
diff --git a/drivers/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
similarity index 100%
rename from drivers/serial/sh-sci.c
rename to drivers/tty/serial/sh-sci.c
diff --git a/drivers/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
similarity index 100%
rename from drivers/serial/sh-sci.h
rename to drivers/tty/serial/sh-sci.h
diff --git a/drivers/serial/sn_console.c b/drivers/tty/serial/sn_console.c
similarity index 100%
rename from drivers/serial/sn_console.c
rename to drivers/tty/serial/sn_console.c
diff --git a/drivers/serial/suncore.c b/drivers/tty/serial/suncore.c
similarity index 100%
rename from drivers/serial/suncore.c
rename to drivers/tty/serial/suncore.c
diff --git a/drivers/serial/suncore.h b/drivers/tty/serial/suncore.h
similarity index 100%
rename from drivers/serial/suncore.h
rename to drivers/tty/serial/suncore.h
diff --git a/drivers/serial/sunhv.c b/drivers/tty/serial/sunhv.c
similarity index 100%
rename from drivers/serial/sunhv.c
rename to drivers/tty/serial/sunhv.c
diff --git a/drivers/serial/sunsab.c b/drivers/tty/serial/sunsab.c
similarity index 100%
rename from drivers/serial/sunsab.c
rename to drivers/tty/serial/sunsab.c
diff --git a/drivers/serial/sunsab.h b/drivers/tty/serial/sunsab.h
similarity index 100%
rename from drivers/serial/sunsab.h
rename to drivers/tty/serial/sunsab.h
diff --git a/drivers/serial/sunsu.c b/drivers/tty/serial/sunsu.c
similarity index 100%
rename from drivers/serial/sunsu.c
rename to drivers/tty/serial/sunsu.c
diff --git a/drivers/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
similarity index 100%
rename from drivers/serial/sunzilog.c
rename to drivers/tty/serial/sunzilog.c
diff --git a/drivers/serial/sunzilog.h b/drivers/tty/serial/sunzilog.h
similarity index 100%
rename from drivers/serial/sunzilog.h
rename to drivers/tty/serial/sunzilog.h
diff --git a/drivers/serial/timbuart.c b/drivers/tty/serial/timbuart.c
similarity index 100%
rename from drivers/serial/timbuart.c
rename to drivers/tty/serial/timbuart.c
diff --git a/drivers/serial/timbuart.h b/drivers/tty/serial/timbuart.h
similarity index 100%
rename from drivers/serial/timbuart.h
rename to drivers/tty/serial/timbuart.h
diff --git a/drivers/serial/uartlite.c b/drivers/tty/serial/uartlite.c
similarity index 100%
rename from drivers/serial/uartlite.c
rename to drivers/tty/serial/uartlite.c
diff --git a/drivers/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
similarity index 100%
rename from drivers/serial/ucc_uart.c
rename to drivers/tty/serial/ucc_uart.c
diff --git a/drivers/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
similarity index 100%
rename from drivers/serial/vr41xx_siu.c
rename to drivers/tty/serial/vr41xx_siu.c
diff --git a/drivers/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
similarity index 100%
rename from drivers/serial/vt8500_serial.c
rename to drivers/tty/serial/vt8500_serial.c
diff --git a/drivers/serial/zs.c b/drivers/tty/serial/zs.c
similarity index 100%
rename from drivers/serial/zs.c
rename to drivers/tty/serial/zs.c
diff --git a/drivers/serial/zs.h b/drivers/tty/serial/zs.h
similarity index 100%
rename from drivers/serial/zs.h
rename to drivers/tty/serial/zs.h
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index c556ed9..81f1395 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -46,7 +46,7 @@
 #include <asm/irq_regs.h>
 
 /* Whether we react on sysrq keys or just ignore them */
-static int __read_mostly sysrq_enabled = 1;
+static int __read_mostly sysrq_enabled = SYSRQ_DEFAULT_ENABLE;
 static bool __read_mostly sysrq_always_enabled;
 
 static bool sysrq_on(void)
@@ -571,6 +571,7 @@
 	unsigned int alt_use;
 	bool active;
 	bool need_reinject;
+	bool reinjecting;
 };
 
 static void sysrq_reinject_alt_sysrq(struct work_struct *work)
@@ -581,6 +582,10 @@
 	unsigned int alt_code = sysrq->alt_use;
 
 	if (sysrq->need_reinject) {
+		/* we do not want the assignment to be reordered */
+		sysrq->reinjecting = true;
+		mb();
+
 		/* Simulate press and release of Alt + SysRq */
 		input_inject_event(handle, EV_KEY, alt_code, 1);
 		input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1);
@@ -589,6 +594,9 @@
 		input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0);
 		input_inject_event(handle, EV_KEY, alt_code, 0);
 		input_inject_event(handle, EV_SYN, SYN_REPORT, 1);
+
+		mb();
+		sysrq->reinjecting = false;
 	}
 }
 
@@ -599,6 +607,13 @@
 	bool was_active = sysrq->active;
 	bool suppress;
 
+	/*
+	 * Do not filter anything if we are in the process of re-injecting
+	 * Alt+SysRq combination.
+	 */
+	if (sysrq->reinjecting)
+		return false;
+
 	switch (type) {
 
 	case EV_SYN:
@@ -629,7 +644,7 @@
 				sysrq->alt_use = sysrq->alt;
 				/*
 				 * If nothing else will be pressed we'll need
-				 * to * re-inject Alt-SysRq keysroke.
+				 * to re-inject Alt-SysRq keysroke.
 				 */
 				sysrq->need_reinject = true;
 			}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 464d09d..0065da4 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -3256,8 +3256,8 @@
 	struct console *c;
 	ssize_t count = 0;
 
-	acquire_console_sem();
-	for (c = console_drivers; c; c = c->next) {
+	console_lock();
+	for_each_console(c) {
 		if (!c->device)
 			continue;
 		if (!c->write)
@@ -3271,7 +3271,7 @@
 	while (i--)
 		count += sprintf(buf + count, "%s%d%c",
 				 cs[i]->name, cs[i]->index, i ? ' ':'\n');
-	release_console_sem();
+	console_unlock();
 
 	return count;
 }
@@ -3306,7 +3306,7 @@
 	if (IS_ERR(consdev))
 		consdev = NULL;
 	else
-		device_create_file(consdev, &dev_attr_active);
+		WARN_ON(device_create_file(consdev, &dev_attr_active) < 0);
 
 #ifdef CONFIG_VT
 	vty_init(&console_fops);
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index ebae344..c956ed6 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -316,9 +316,9 @@
 	/* always called with BTM from vt_ioctl */
 	WARN_ON(!tty_locked());
 
-	acquire_console_sem();
+	console_lock();
 	poke_blanked_console();
-	release_console_sem();
+	console_unlock();
 
 	ld = tty_ldisc_ref(tty);
 	if (!ld) {
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index eab3a1f..a672ed1 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -202,7 +202,7 @@
 	/* Select the proper current console and verify
 	 * sanity of the situation under the console lock.
 	 */
-	acquire_console_sem();
+	console_lock();
 
 	attr = (currcons & 128);
 	currcons = (currcons & 127);
@@ -336,9 +336,9 @@
 		 * the pagefault handling code may want to call printk().
 		 */
 
-		release_console_sem();
+		console_unlock();
 		ret = copy_to_user(buf, con_buf_start, orig_count);
-		acquire_console_sem();
+		console_lock();
 
 		if (ret) {
 			read += (orig_count - ret);
@@ -354,7 +354,7 @@
 	if (read)
 		ret = read;
 unlock_out:
-	release_console_sem();
+	console_unlock();
 	mutex_unlock(&con_buf_mtx);
 	return ret;
 }
@@ -379,7 +379,7 @@
 	/* Select the proper current console and verify
 	 * sanity of the situation under the console lock.
 	 */
-	acquire_console_sem();
+	console_lock();
 
 	attr = (currcons & 128);
 	currcons = (currcons & 127);
@@ -414,9 +414,9 @@
 		/* Temporarily drop the console lock so that we can read
 		 * in the write data from userspace safely.
 		 */
-		release_console_sem();
+		console_unlock();
 		ret = copy_from_user(con_buf, buf, this_round);
-		acquire_console_sem();
+		console_lock();
 
 		if (ret) {
 			this_round -= ret;
@@ -542,7 +542,7 @@
 		vcs_scr_updated(vc);
 
 unlock_out:
-	release_console_sem();
+	console_unlock();
 
 	mutex_unlock(&con_buf_mtx);
 
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 76407ec..147ede34 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1003,9 +1003,9 @@
 	struct vc_data *vc = tty->driver_data;
 	int ret;
 
-	acquire_console_sem();
+	console_lock();
 	ret = vc_do_resize(tty, vc, ws->ws_col, ws->ws_row);
-	release_console_sem();
+	console_unlock();
 	return ret;
 }
 
@@ -1271,7 +1271,7 @@
 	vc->vc_color = vc->vc_def_color;
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void csi_m(struct vc_data *vc)
 {
 	int i;
@@ -1415,7 +1415,7 @@
 	return vc_cons[fg_console].d->vc_report_mouse;
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void set_mode(struct vc_data *vc, int on_off)
 {
 	int i;
@@ -1485,7 +1485,7 @@
 		}
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void setterm_command(struct vc_data *vc)
 {
 	switch(vc->vc_par[0]) {
@@ -1545,7 +1545,7 @@
 	}
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void csi_at(struct vc_data *vc, unsigned int nr)
 {
 	if (nr > vc->vc_cols - vc->vc_x)
@@ -1555,7 +1555,7 @@
 	insert_char(vc, nr);
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void csi_L(struct vc_data *vc, unsigned int nr)
 {
 	if (nr > vc->vc_rows - vc->vc_y)
@@ -1566,7 +1566,7 @@
 	vc->vc_need_wrap = 0;
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void csi_P(struct vc_data *vc, unsigned int nr)
 {
 	if (nr > vc->vc_cols - vc->vc_x)
@@ -1576,7 +1576,7 @@
 	delete_char(vc, nr);
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void csi_M(struct vc_data *vc, unsigned int nr)
 {
 	if (nr > vc->vc_rows - vc->vc_y)
@@ -1587,7 +1587,7 @@
 	vc->vc_need_wrap = 0;
 }
 
-/* console_sem is held (except via vc_init->reset_terminal */
+/* console_lock is held (except via vc_init->reset_terminal */
 static void save_cur(struct vc_data *vc)
 {
 	vc->vc_saved_x		= vc->vc_x;
@@ -1603,7 +1603,7 @@
 	vc->vc_saved_G1		= vc->vc_G1_charset;
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void restore_cur(struct vc_data *vc)
 {
 	gotoxy(vc, vc->vc_saved_x, vc->vc_saved_y);
@@ -1625,7 +1625,7 @@
 	EShash, ESsetG0, ESsetG1, ESpercent, ESignore, ESnonstd,
 	ESpalette };
 
-/* console_sem is held (except via vc_init()) */
+/* console_lock is held (except via vc_init()) */
 static void reset_terminal(struct vc_data *vc, int do_clear)
 {
 	vc->vc_top		= 0;
@@ -1685,7 +1685,7 @@
 	    csi_J(vc, 2);
 }
 
-/* console_sem is held */
+/* console_lock is held */
 static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
 {
 	/*
@@ -2119,7 +2119,7 @@
 	return bisearch(ucs, double_width, ARRAY_SIZE(double_width) - 1);
 }
 
-/* acquires console_sem */
+/* acquires console_lock */
 static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int count)
 {
 #ifdef VT_BUF_VRAM_ONLY
@@ -2147,11 +2147,11 @@
 
 	might_sleep();
 
-	acquire_console_sem();
+	console_lock();
 	vc = tty->driver_data;
 	if (vc == NULL) {
 		printk(KERN_ERR "vt: argh, driver_data is NULL !\n");
-		release_console_sem();
+		console_unlock();
 		return 0;
 	}
 
@@ -2159,7 +2159,7 @@
 	if (!vc_cons_allocated(currcons)) {
 	    /* could this happen? */
 		printk_once("con_write: tty %d not allocated\n", currcons+1);
-	    release_console_sem();
+	    console_unlock();
 	    return 0;
 	}
 
@@ -2375,7 +2375,7 @@
 	}
 	FLUSH
 	console_conditional_schedule();
-	release_console_sem();
+	console_unlock();
 	notify_update(vc);
 	return n;
 #undef FLUSH
@@ -2388,11 +2388,11 @@
  * us to do the switches asynchronously (needed when we want
  * to switch due to a keyboard interrupt).  Synchronization
  * with other console code and prevention of re-entrancy is
- * ensured with console_sem.
+ * ensured with console_lock.
  */
 static void console_callback(struct work_struct *ignored)
 {
-	acquire_console_sem();
+	console_lock();
 
 	if (want_console >= 0) {
 		if (want_console != fg_console &&
@@ -2422,7 +2422,7 @@
 	}
 	notify_update(vc_cons[fg_console].d);
 
-	release_console_sem();
+	console_unlock();
 }
 
 int set_console(int nr)
@@ -2603,7 +2603,7 @@
  */
 
 /*
- * Generally a bit racy with respect to console_sem().
+ * Generally a bit racy with respect to console_lock();.
  *
  * There are some functions which don't need it.
  *
@@ -2629,17 +2629,17 @@
 	switch (type)
 	{
 		case TIOCL_SETSEL:
-			acquire_console_sem();
+			console_lock();
 			ret = set_selection((struct tiocl_selection __user *)(p+1), tty);
-			release_console_sem();
+			console_unlock();
 			break;
 		case TIOCL_PASTESEL:
 			ret = paste_selection(tty);
 			break;
 		case TIOCL_UNBLANKSCREEN:
-			acquire_console_sem();
+			console_lock();
 			unblank_screen();
-			release_console_sem();
+			console_unlock();
 			break;
 		case TIOCL_SELLOADLUT:
 			ret = sel_loadlut(p);
@@ -2688,10 +2688,10 @@
 			}
 			break;
 		case TIOCL_BLANKSCREEN:	/* until explicitly unblanked, not only poked */
-			acquire_console_sem();
+			console_lock();
 			ignore_poke = 1;
 			do_blank_screen(0);
-			release_console_sem();
+			console_unlock();
 			break;
 		case TIOCL_BLANKEDSCREEN:
 			ret = console_blanked;
@@ -2790,11 +2790,11 @@
 		return;
 
 	/* if we race with con_close(), vt may be null */
-	acquire_console_sem();
+	console_lock();
 	vc = tty->driver_data;
 	if (vc)
 		set_cursor(vc);
-	release_console_sem();
+	console_unlock();
 }
 
 /*
@@ -2805,7 +2805,7 @@
 	unsigned int currcons = tty->index;
 	int ret = 0;
 
-	acquire_console_sem();
+	console_lock();
 	if (tty->driver_data == NULL) {
 		ret = vc_allocate(currcons);
 		if (ret == 0) {
@@ -2813,7 +2813,7 @@
 
 			/* Still being freed */
 			if (vc->port.tty) {
-				release_console_sem();
+				console_unlock();
 				return -ERESTARTSYS;
 			}
 			tty->driver_data = vc;
@@ -2827,11 +2827,11 @@
 				tty->termios->c_iflag |= IUTF8;
 			else
 				tty->termios->c_iflag &= ~IUTF8;
-			release_console_sem();
+			console_unlock();
 			return ret;
 		}
 	}
-	release_console_sem();
+	console_unlock();
 	return ret;
 }
 
@@ -2844,9 +2844,9 @@
 {
 	struct vc_data *vc = tty->driver_data;
 	BUG_ON(vc == NULL);
-	acquire_console_sem();
+	console_lock();
 	vc->port.tty = NULL;
-	release_console_sem();
+	console_unlock();
 	tty_shutdown(tty);
 }
 
@@ -2893,13 +2893,13 @@
 	struct vc_data *vc;
 	unsigned int currcons = 0, i;
 
-	acquire_console_sem();
+	console_lock();
 
 	if (conswitchp)
 		display_desc = conswitchp->con_startup();
 	if (!display_desc) {
 		fg_console = 0;
-		release_console_sem();
+		console_unlock();
 		return 0;
 	}
 
@@ -2946,7 +2946,7 @@
 	printable = 1;
 	printk("\n");
 
-	release_console_sem();
+	console_unlock();
 
 #ifdef CONFIG_VT_CONSOLE
 	register_console(&vt_console_driver);
@@ -2994,7 +2994,7 @@
 	if (IS_ERR(tty0dev))
 		tty0dev = NULL;
 	else
-		device_create_file(tty0dev, &dev_attr_active);
+		WARN_ON(device_create_file(tty0dev, &dev_attr_active) < 0);
 
 	vcs_init();
 
@@ -3037,7 +3037,7 @@
 	if (!try_module_get(owner))
 		return -ENODEV;
 
-	acquire_console_sem();
+	console_lock();
 
 	/* check if driver is registered */
 	for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
@@ -3122,7 +3122,7 @@
 
 	retval = 0;
 err:
-	release_console_sem();
+	console_unlock();
 	module_put(owner);
 	return retval;
 };
@@ -3171,7 +3171,7 @@
 	if (!try_module_get(owner))
 		return -ENODEV;
 
-	acquire_console_sem();
+	console_lock();
 
 	/* check if driver is registered and if it is unbindable */
 	for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
@@ -3185,7 +3185,7 @@
 	}
 
 	if (retval) {
-		release_console_sem();
+		console_unlock();
 		goto err;
 	}
 
@@ -3204,12 +3204,12 @@
 	}
 
 	if (retval) {
-		release_console_sem();
+		console_unlock();
 		goto err;
 	}
 
 	if (!con_is_bound(csw)) {
-		release_console_sem();
+		console_unlock();
 		goto err;
 	}
 
@@ -3238,7 +3238,7 @@
 	if (!con_is_bound(csw))
 		con_driver->flag &= ~CON_DRIVER_FLAG_INIT;
 
-	release_console_sem();
+	console_unlock();
 	/* ignore return value, binding should not fail */
 	bind_con_driver(defcsw, first, last, deflt);
 err:
@@ -3538,14 +3538,14 @@
 	if (!try_module_get(owner))
 		return -ENODEV;
 
-	acquire_console_sem();
+	console_lock();
 
 	for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
 		con_driver = &registered_con_driver[i];
 
 		/* already registered */
 		if (con_driver->con == csw)
-			retval = -EINVAL;
+			retval = -EBUSY;
 	}
 
 	if (retval)
@@ -3592,7 +3592,7 @@
 	}
 
 err:
-	release_console_sem();
+	console_unlock();
 	module_put(owner);
 	return retval;
 }
@@ -3613,7 +3613,7 @@
 {
 	int i, retval = -ENODEV;
 
-	acquire_console_sem();
+	console_lock();
 
 	/* cannot unregister a bound driver */
 	if (con_is_bound(csw))
@@ -3639,7 +3639,7 @@
 		}
 	}
 err:
-	release_console_sem();
+	console_unlock();
 	return retval;
 }
 EXPORT_SYMBOL(unregister_con_driver);
@@ -3656,7 +3656,12 @@
 	int err;
 
 	err = register_con_driver(csw, first, last);
-
+	/* if we get an busy error we still want to bind the console driver
+	 * and return success, as we may have unbound the console driver
+	 * but not unregistered it.
+	*/
+	if (err == -EBUSY)
+		err = 0;
 	if (!err)
 		bind_con_driver(csw, first, last, deflt);
 
@@ -3934,9 +3939,9 @@
 {
 	int rc;
 
-	acquire_console_sem();
+	console_lock();
 	rc = set_get_cmap (arg,1);
-	release_console_sem();
+	console_unlock();
 
 	return rc;
 }
@@ -3945,9 +3950,9 @@
 {
 	int rc;
 
-	acquire_console_sem();
+	console_lock();
 	rc = set_get_cmap (arg,0);
-	release_console_sem();
+	console_unlock();
 
 	return rc;
 }
@@ -3994,12 +3999,12 @@
 	} else
 		font.data = NULL;
 
-	acquire_console_sem();
+	console_lock();
 	if (vc->vc_sw->con_font_get)
 		rc = vc->vc_sw->con_font_get(vc, &font);
 	else
 		rc = -ENOSYS;
-	release_console_sem();
+	console_unlock();
 
 	if (rc)
 		goto out;
@@ -4076,12 +4081,12 @@
 	font.data = memdup_user(op->data, size);
 	if (IS_ERR(font.data))
 		return PTR_ERR(font.data);
-	acquire_console_sem();
+	console_lock();
 	if (vc->vc_sw->con_font_set)
 		rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
 	else
 		rc = -ENOSYS;
-	release_console_sem();
+	console_unlock();
 	kfree(font.data);
 	return rc;
 }
@@ -4103,12 +4108,12 @@
 	else
 		name[MAX_FONT_NAME - 1] = 0;
 
-	acquire_console_sem();
+	console_lock();
 	if (vc->vc_sw->con_font_default)
 		rc = vc->vc_sw->con_font_default(vc, &font, s);
 	else
 		rc = -ENOSYS;
-	release_console_sem();
+	console_unlock();
 	if (!rc) {
 		op->width = font.width;
 		op->height = font.height;
@@ -4124,7 +4129,7 @@
 	if (vc->vc_mode != KD_TEXT)
 		return -EINVAL;
 
-	acquire_console_sem();
+	console_lock();
 	if (!vc->vc_sw->con_font_copy)
 		rc = -ENOSYS;
 	else if (con < 0 || !vc_cons_allocated(con))
@@ -4133,7 +4138,7 @@
 		rc = 0;
 	else
 		rc = vc->vc_sw->con_font_copy(vc, con);
-	release_console_sem();
+	console_unlock();
 	return rc;
 }
 
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 6b68a0f..1235ebd 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -649,12 +649,12 @@
 		/*
 		 * explicitly blank/unblank the screen if switching modes
 		 */
-		acquire_console_sem();
+		console_lock();
 		if (arg == KD_TEXT)
 			do_unblank_screen(1);
 		else
 			do_blank_screen(1);
-		release_console_sem();
+		console_unlock();
 		break;
 
 	case KDGETMODE:
@@ -893,7 +893,7 @@
 			ret = -EINVAL;
 			goto out;
 		}
-		acquire_console_sem();
+		console_lock();
 		vc->vt_mode = tmp;
 		/* the frsig is ignored, so we set it to 0 */
 		vc->vt_mode.frsig = 0;
@@ -901,7 +901,7 @@
 		vc->vt_pid = get_pid(task_pid(current));
 		/* no switch is required -- saw@shade.msu.ru */
 		vc->vt_newvt = -1;
-		release_console_sem();
+		console_unlock();
 		break;
 	}
 
@@ -910,9 +910,9 @@
 		struct vt_mode tmp;
 		int rc;
 
-		acquire_console_sem();
+		console_lock();
 		memcpy(&tmp, &vc->vt_mode, sizeof(struct vt_mode));
-		release_console_sem();
+		console_unlock();
 
 		rc = copy_to_user(up, &tmp, sizeof(struct vt_mode));
 		if (rc)
@@ -965,9 +965,9 @@
 			ret =  -ENXIO;
 		else {
 			arg--;
-			acquire_console_sem();
+			console_lock();
 			ret = vc_allocate(arg);
-			release_console_sem();
+			console_unlock();
 			if (ret)
 				break;
 			set_console(arg);
@@ -990,7 +990,7 @@
 			ret = -ENXIO;
 		else {
 			vsa.console--;
-			acquire_console_sem();
+			console_lock();
 			ret = vc_allocate(vsa.console);
 			if (ret == 0) {
 				struct vc_data *nvc;
@@ -1003,7 +1003,7 @@
 				put_pid(nvc->vt_pid);
 				nvc->vt_pid = get_pid(task_pid(current));
 			}
-			release_console_sem();
+			console_unlock();
 			if (ret)
 				break;
 			/* Commence switch and lock */
@@ -1044,7 +1044,7 @@
 		/*
 		 * Switching-from response
 		 */
-		acquire_console_sem();
+		console_lock();
 		if (vc->vt_newvt >= 0) {
 			if (arg == 0)
 				/*
@@ -1063,7 +1063,7 @@
 				vc->vt_newvt = -1;
 				ret = vc_allocate(newvt);
 				if (ret) {
-					release_console_sem();
+					console_unlock();
 					break;
 				}
 				/*
@@ -1083,7 +1083,7 @@
 			if (arg != VT_ACKACQ)
 				ret = -EINVAL;
 		}
-		release_console_sem();
+		console_unlock();
 		break;
 
 	 /*
@@ -1096,20 +1096,20 @@
 		}
 		if (arg == 0) {
 		    /* deallocate all unused consoles, but leave 0 */
-			acquire_console_sem();
+			console_lock();
 			for (i=1; i<MAX_NR_CONSOLES; i++)
 				if (! VT_BUSY(i))
 					vc_deallocate(i);
-			release_console_sem();
+			console_unlock();
 		} else {
 			/* deallocate a single console, if possible */
 			arg--;
 			if (VT_BUSY(arg))
 				ret = -EBUSY;
 			else if (arg) {			      /* leave 0 */
-				acquire_console_sem();
+				console_lock();
 				vc_deallocate(arg);
-				release_console_sem();
+				console_unlock();
 			}
 		}
 		break;
@@ -1126,7 +1126,7 @@
 		    get_user(cc, &vtsizes->v_cols))
 			ret = -EFAULT;
 		else {
-			acquire_console_sem();
+			console_lock();
 			for (i = 0; i < MAX_NR_CONSOLES; i++) {
 				vc = vc_cons[i].d;
 
@@ -1135,7 +1135,7 @@
 					vc_resize(vc_cons[i].d, cc, ll);
 				}
 			}
-			release_console_sem();
+			console_unlock();
 		}
 		break;
 	}
@@ -1187,14 +1187,14 @@
 		for (i = 0; i < MAX_NR_CONSOLES; i++) {
 			if (!vc_cons[i].d)
 				continue;
-			acquire_console_sem();
+			console_lock();
 			if (vlin)
 				vc_cons[i].d->vc_scan_lines = vlin;
 			if (clin)
 				vc_cons[i].d->vc_font.height = clin;
 			vc_cons[i].d->vc_resize_user = 1;
 			vc_resize(vc_cons[i].d, cc, ll);
-			release_console_sem();
+			console_unlock();
 		}
 		break;
 	}
@@ -1367,7 +1367,7 @@
 	struct vc_data *vc;
 	struct tty_struct *tty;
 
-	acquire_console_sem();
+	console_lock();
 	vc = vc_con->d;
 	if (vc) {
 		tty = vc->port.tty;
@@ -1379,7 +1379,7 @@
 			__do_SAK(tty);
 		reset_vc(vc);
 	}
-	release_console_sem();
+	console_unlock();
 }
 
 #ifdef CONFIG_COMPAT
@@ -1737,10 +1737,10 @@
 {
 	int prev;
 
-	acquire_console_sem();
+	console_lock();
 	/* Graphics mode - up to X */
 	if (disable_vt_switch) {
-		release_console_sem();
+		console_unlock();
 		return 0;
 	}
 	prev = fg_console;
@@ -1748,7 +1748,7 @@
 	if (alloc && vc_allocate(vt)) {
 		/* we can't have a free VC for now. Too bad,
 		 * we don't want to mess the screen for now. */
-		release_console_sem();
+		console_unlock();
 		return -ENOSPC;
 	}
 
@@ -1758,10 +1758,10 @@
 		 * Let the calling function know so it can decide
 		 * what to do.
 		 */
-		release_console_sem();
+		console_unlock();
 		return -EIO;
 	}
-	release_console_sem();
+	console_unlock();
 	tty_lock();
 	if (vt_waitactive(vt + 1)) {
 		pr_debug("Suspend: Can't switch VCs.");
@@ -1781,8 +1781,8 @@
  */
 void pm_set_vt_switch(int do_switch)
 {
-	acquire_console_sem();
+	console_lock();
 	disable_vt_switch = !do_switch;
-	release_console_sem();
+	console_unlock();
 }
 EXPORT_SYMBOL(pm_set_vt_switch);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index d6ede98..4ab49d4 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1607,6 +1607,7 @@
 	{ NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
 	{ NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
 	{ NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
+	{ NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */
 	{ SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
 
 	/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 6ee4451..47085e5 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -342,7 +342,7 @@
 		goto outnp;
 	}
 
-	if (!file->f_flags && O_NONBLOCK)
+	if (!(file->f_flags & O_NONBLOCK))
 		r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
 								&desc->flags));
 	else
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index bcc2477..18d02e3 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -123,9 +123,9 @@
 
 config USB_OTG_WHITELIST
 	bool "Rely on OTG Targeted Peripherals List"
-	depends on USB_OTG || EMBEDDED
+	depends on USB_OTG || EXPERT
 	default y if USB_OTG
-	default n if EMBEDDED
+	default n if EXPERT
 	help
 	  If you say Y here, the "otg_whitelist.h" file will be used as a
 	  product whitelist, so USB peripherals not listed there will be
@@ -141,7 +141,7 @@
 
 config USB_OTG_BLACKLIST_HUB
 	bool "Disable external hubs"
-	depends on USB_OTG || EMBEDDED
+	depends on USB_OTG || EXPERT
 	help
 	  If you say Y here, then Linux will refuse to enumerate
 	  external hubs.  OTG hosts are allowed to reduce hardware
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 9da2505..df502a9 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -192,12 +192,12 @@
 	ep_dev->dev.parent = parent;
 	ep_dev->dev.release = ep_device_release;
 	dev_set_name(&ep_dev->dev, "ep_%02x", endpoint->desc.bEndpointAddress);
-	device_enable_async_suspend(&ep_dev->dev);
 
 	retval = device_register(&ep_dev->dev);
 	if (retval)
 		goto error_register;
 
+	device_enable_async_suspend(&ep_dev->dev);
 	endpoint->ep_dev = ep_dev;
 	return retval;
 
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index b55d460..f71e8e3 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -405,7 +405,12 @@
 			return retval;
 	}
 
-	synchronize_irq(pci_dev->irq);
+	/* If MSI-X is enabled, the driver will have synchronized all vectors
+	 * in pci_suspend(). If MSI or legacy PCI is enabled, that will be
+	 * synchronized here.
+	 */
+	if (!hcd->msix_enabled)
+		synchronize_irq(pci_dev->irq);
 
 	/* Downstream ports from this root hub should already be quiesced, so
 	 * there will be no DMA activity.  Now we can shut down the upstream
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 6a95017..e935f71 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1955,7 +1955,6 @@
 
 	dev_dbg(&rhdev->dev, "usb %s%s\n",
 			(msg.event & PM_EVENT_AUTO ? "auto-" : ""), "resume");
-	clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
 	if (!hcd->driver->bus_resume)
 		return -ENOENT;
 	if (hcd->state == HC_STATE_RUNNING)
@@ -1963,6 +1962,7 @@
 
 	hcd->state = HC_STATE_RESUMING;
 	status = hcd->driver->bus_resume(hcd);
+	clear_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
 	if (status == 0) {
 		/* TRSMRCY = 10 msec */
 		msleep(10);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index b98efae..d041c68 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -676,6 +676,8 @@
 static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
 {
 	struct usb_device *hdev = hub->hdev;
+	struct usb_hcd *hcd;
+	int ret;
 	int port1;
 	int status;
 	bool need_debounce_delay = false;
@@ -714,6 +716,25 @@
 			usb_autopm_get_interface_no_resume(
 					to_usb_interface(hub->intfdev));
 			return;		/* Continues at init2: below */
+		} else if (type == HUB_RESET_RESUME) {
+			/* The internal host controller state for the hub device
+			 * may be gone after a host power loss on system resume.
+			 * Update the device's info so the HW knows it's a hub.
+			 */
+			hcd = bus_to_hcd(hdev->bus);
+			if (hcd->driver->update_hub_device) {
+				ret = hcd->driver->update_hub_device(hcd, hdev,
+						&hub->tt, GFP_NOIO);
+				if (ret < 0) {
+					dev_err(hub->intfdev, "Host not "
+							"accepting hub info "
+							"update.\n");
+					dev_err(hub->intfdev, "LS/FS devices "
+							"and hubs may not work "
+							"under this hub\n.");
+				}
+			}
+			hub_power_on(hub, true);
 		} else {
 			hub_power_on(hub, true);
 		}
@@ -2732,6 +2753,11 @@
 		udev->ttport = hdev->ttport;
 	} else if (udev->speed != USB_SPEED_HIGH
 			&& hdev->speed == USB_SPEED_HIGH) {
+		if (!hub->tt.hub) {
+			dev_err(&udev->dev, "parent hub has no TT\n");
+			retval = -EINVAL;
+			goto fail;
+		}
 		udev->tt = &hub->tt;
 		udev->ttport = port1;
 	}
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 1dc9739..d500996 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -509,7 +509,7 @@
 	select USB_GADGET_SELECTED
 
 config USB_GADGET_EG20T
-	boolean "Intel EG20T(Topcliff) USB Device controller"
+	boolean "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH UDC"
 	depends on PCI
 	select USB_GADGET_DUALSPEED
 	help
@@ -525,6 +525,11 @@
 	  This driver dose not support interrupt transfer or isochronous
 	  transfer modes.
 
+	  This driver also can be used for OKI SEMICONDUCTOR's ML7213 which is
+	  for IVI(In-Vehicle Infotainment) use.
+	  ML7213 is companion chip for Intel Atom E6xx series.
+	  ML7213 is completely compatible for Intel EG20T PCH.
+
 config USB_EG20T
 	tristate
 	depends on USB_GADGET_EG20T
@@ -541,6 +546,8 @@
 	  ci13xxx_udc core.
 	  This driver depends on OTG driver for PHY initialization,
 	  clock management, powering up VBUS, and power management.
+	  This driver is not supported on boards like trout which
+	  has an external PHY.
 
 	  Say "y" to link the driver statically, or "m" to build a
 	  dynamically linked module called "ci13xxx_msm" and force all
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 31656a2..a1c67ae 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -76,10 +76,21 @@
 
 /* control endpoint description */
 static const struct usb_endpoint_descriptor
-ctrl_endpt_desc = {
+ctrl_endpt_out_desc = {
 	.bLength         = USB_DT_ENDPOINT_SIZE,
 	.bDescriptorType = USB_DT_ENDPOINT,
 
+	.bEndpointAddress = USB_DIR_OUT,
+	.bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
+	.wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
+};
+
+static const struct usb_endpoint_descriptor
+ctrl_endpt_in_desc = {
+	.bLength         = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType = USB_DT_ENDPOINT,
+
+	.bEndpointAddress = USB_DIR_IN,
 	.bmAttributes    = USB_ENDPOINT_XFER_CONTROL,
 	.wMaxPacketSize  = cpu_to_le16(CTRL_PAYLOAD_MAX),
 };
@@ -265,10 +276,10 @@
 	hw_bank.size /= sizeof(u32);
 
 	reg = hw_aread(ABS_DCCPARAMS, DCCPARAMS_DEN) >> ffs_nr(DCCPARAMS_DEN);
-	if (reg == 0 || reg > ENDPT_MAX)
-		return -ENODEV;
+	hw_ep_max = reg * 2;   /* cache hw ENDPT_MAX */
 
-	hw_ep_max = reg;   /* cache hw ENDPT_MAX */
+	if (hw_ep_max == 0 || hw_ep_max > ENDPT_MAX)
+		return -ENODEV;
 
 	/* setup lock mode ? */
 
@@ -1197,16 +1208,17 @@
 	}
 
 	spin_lock_irqsave(udc->lock, flags);
-	for (i = 0; i < hw_ep_max; i++) {
-		struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+	for (i = 0; i < hw_ep_max/2; i++) {
+		struct ci13xxx_ep *mEpRx = &udc->ci13xxx_ep[i];
+		struct ci13xxx_ep *mEpTx = &udc->ci13xxx_ep[i + hw_ep_max/2];
 		n += scnprintf(buf + n, PAGE_SIZE - n,
 			       "EP=%02i: RX=%08X TX=%08X\n",
-			       i, (u32)mEp->qh[RX].dma, (u32)mEp->qh[TX].dma);
+			       i, (u32)mEpRx->qh.dma, (u32)mEpTx->qh.dma);
 		for (j = 0; j < (sizeof(struct ci13xxx_qh)/sizeof(u32)); j++) {
 			n += scnprintf(buf + n, PAGE_SIZE - n,
 				       " %04X:    %08X    %08X\n", j,
-				       *((u32 *)mEp->qh[RX].ptr + j),
-				       *((u32 *)mEp->qh[TX].ptr + j));
+				       *((u32 *)mEpRx->qh.ptr + j),
+				       *((u32 *)mEpTx->qh.ptr + j));
 		}
 	}
 	spin_unlock_irqrestore(udc->lock, flags);
@@ -1293,7 +1305,7 @@
 	unsigned long flags;
 	struct list_head   *ptr = NULL;
 	struct ci13xxx_req *req = NULL;
-	unsigned i, j, k, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
+	unsigned i, j, n = 0, qSize = sizeof(struct ci13xxx_td)/sizeof(u32);
 
 	dbg_trace("[%s] %p\n", __func__, buf);
 	if (attr == NULL || buf == NULL) {
@@ -1303,22 +1315,20 @@
 
 	spin_lock_irqsave(udc->lock, flags);
 	for (i = 0; i < hw_ep_max; i++)
-		for (k = RX; k <= TX; k++)
-			list_for_each(ptr, &udc->ci13xxx_ep[i].qh[k].queue)
-			{
-				req = list_entry(ptr,
-						 struct ci13xxx_req, queue);
+		list_for_each(ptr, &udc->ci13xxx_ep[i].qh.queue)
+		{
+			req = list_entry(ptr, struct ci13xxx_req, queue);
 
+			n += scnprintf(buf + n, PAGE_SIZE - n,
+					"EP=%02i: TD=%08X %s\n",
+					i % hw_ep_max/2, (u32)req->dma,
+					((i < hw_ep_max/2) ? "RX" : "TX"));
+
+			for (j = 0; j < qSize; j++)
 				n += scnprintf(buf + n, PAGE_SIZE - n,
-					       "EP=%02i: TD=%08X %s\n",
-					       i, (u32)req->dma,
-					       ((k == RX) ? "RX" : "TX"));
-
-				for (j = 0; j < qSize; j++)
-					n += scnprintf(buf + n, PAGE_SIZE - n,
-						       " %04X:    %08X\n", j,
-						       *((u32 *)req->ptr + j));
-			}
+						" %04X:    %08X\n", j,
+						*((u32 *)req->ptr + j));
+		}
 	spin_unlock_irqrestore(udc->lock, flags);
 
 	return n;
@@ -1467,12 +1477,12 @@
 	 *  At this point it's guaranteed exclusive access to qhead
 	 *  (endpt is not primed) so it's no need to use tripwire
 	 */
-	mEp->qh[mEp->dir].ptr->td.next   = mReq->dma;    /* TERMINATE = 0 */
-	mEp->qh[mEp->dir].ptr->td.token &= ~TD_STATUS;   /* clear status */
+	mEp->qh.ptr->td.next   = mReq->dma;    /* TERMINATE = 0 */
+	mEp->qh.ptr->td.token &= ~TD_STATUS;   /* clear status */
 	if (mReq->req.zero == 0)
-		mEp->qh[mEp->dir].ptr->cap |=  QH_ZLT;
+		mEp->qh.ptr->cap |=  QH_ZLT;
 	else
-		mEp->qh[mEp->dir].ptr->cap &= ~QH_ZLT;
+		mEp->qh.ptr->cap &= ~QH_ZLT;
 
 	wmb();   /* synchronize before ep prime */
 
@@ -1542,11 +1552,11 @@
 
 	hw_ep_flush(mEp->num, mEp->dir);
 
-	while (!list_empty(&mEp->qh[mEp->dir].queue)) {
+	while (!list_empty(&mEp->qh.queue)) {
 
 		/* pop oldest request */
 		struct ci13xxx_req *mReq = \
-			list_entry(mEp->qh[mEp->dir].queue.next,
+			list_entry(mEp->qh.queue.next,
 				   struct ci13xxx_req, queue);
 		list_del_init(&mReq->queue);
 		mReq->req.status = -ESHUTDOWN;
@@ -1571,8 +1581,6 @@
 {
 	struct usb_ep *ep;
 	struct ci13xxx    *udc = container_of(gadget, struct ci13xxx, gadget);
-	struct ci13xxx_ep *mEp = container_of(gadget->ep0,
-					      struct ci13xxx_ep, ep);
 
 	trace("%p", gadget);
 
@@ -1583,7 +1591,8 @@
 	gadget_for_each_ep(ep, gadget) {
 		usb_ep_fifo_flush(ep);
 	}
-	usb_ep_fifo_flush(gadget->ep0);
+	usb_ep_fifo_flush(&udc->ep0out.ep);
+	usb_ep_fifo_flush(&udc->ep0in.ep);
 
 	udc->driver->disconnect(gadget);
 
@@ -1591,11 +1600,12 @@
 	gadget_for_each_ep(ep, gadget) {
 		usb_ep_disable(ep);
 	}
-	usb_ep_disable(gadget->ep0);
+	usb_ep_disable(&udc->ep0out.ep);
+	usb_ep_disable(&udc->ep0in.ep);
 
-	if (mEp->status != NULL) {
-		usb_ep_free_request(gadget->ep0, mEp->status);
-		mEp->status = NULL;
+	if (udc->status != NULL) {
+		usb_ep_free_request(&udc->ep0in.ep, udc->status);
+		udc->status = NULL;
 	}
 
 	return 0;
@@ -1614,7 +1624,6 @@
 __releases(udc->lock)
 __acquires(udc->lock)
 {
-	struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[0];
 	int retval;
 
 	trace("%p", udc);
@@ -1635,11 +1644,15 @@
 	if (retval)
 		goto done;
 
-	retval = usb_ep_enable(&mEp->ep, &ctrl_endpt_desc);
+	retval = usb_ep_enable(&udc->ep0out.ep, &ctrl_endpt_out_desc);
+	if (retval)
+		goto done;
+
+	retval = usb_ep_enable(&udc->ep0in.ep, &ctrl_endpt_in_desc);
 	if (!retval) {
-		mEp->status = usb_ep_alloc_request(&mEp->ep, GFP_ATOMIC);
-		if (mEp->status == NULL) {
-			usb_ep_disable(&mEp->ep);
+		udc->status = usb_ep_alloc_request(&udc->ep0in.ep, GFP_ATOMIC);
+		if (udc->status == NULL) {
+			usb_ep_disable(&udc->ep0out.ep);
 			retval = -ENOMEM;
 		}
 	}
@@ -1672,16 +1685,17 @@
 
 /**
  * isr_get_status_response: get_status request response
- * @ep:    endpoint
+ * @udc: udc struct
  * @setup: setup request packet
  *
  * This function returns an error code
  */
-static int isr_get_status_response(struct ci13xxx_ep *mEp,
+static int isr_get_status_response(struct ci13xxx *udc,
 				   struct usb_ctrlrequest *setup)
 __releases(mEp->lock)
 __acquires(mEp->lock)
 {
+	struct ci13xxx_ep *mEp = &udc->ep0in;
 	struct usb_request *req = NULL;
 	gfp_t gfp_flags = GFP_ATOMIC;
 	int dir, num, retval;
@@ -1736,27 +1750,23 @@
 
 /**
  * isr_setup_status_phase: queues the status phase of a setup transation
- * @mEp: endpoint
+ * @udc: udc struct
  *
  * This function returns an error code
  */
-static int isr_setup_status_phase(struct ci13xxx_ep *mEp)
+static int isr_setup_status_phase(struct ci13xxx *udc)
 __releases(mEp->lock)
 __acquires(mEp->lock)
 {
 	int retval;
+	struct ci13xxx_ep *mEp;
 
-	trace("%p", mEp);
+	trace("%p", udc);
 
-	/* mEp is always valid & configured */
-
-	if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
-		mEp->dir = (mEp->dir == TX) ? RX : TX;
-
-	mEp->status->no_interrupt = 1;
+	mEp = (udc->ep0_dir == TX) ? &udc->ep0out : &udc->ep0in;
 
 	spin_unlock(mEp->lock);
-	retval = usb_ep_queue(&mEp->ep, mEp->status, GFP_ATOMIC);
+	retval = usb_ep_queue(&mEp->ep, udc->status, GFP_ATOMIC);
 	spin_lock(mEp->lock);
 
 	return retval;
@@ -1778,11 +1788,11 @@
 
 	trace("%p", mEp);
 
-	if (list_empty(&mEp->qh[mEp->dir].queue))
+	if (list_empty(&mEp->qh.queue))
 		return -EINVAL;
 
 	/* pop oldest request */
-	mReq = list_entry(mEp->qh[mEp->dir].queue.next,
+	mReq = list_entry(mEp->qh.queue.next,
 			  struct ci13xxx_req, queue);
 	list_del_init(&mReq->queue);
 
@@ -1794,10 +1804,10 @@
 
 	dbg_done(_usb_addr(mEp), mReq->ptr->token, retval);
 
-	if (!list_empty(&mEp->qh[mEp->dir].queue)) {
+	if (!list_empty(&mEp->qh.queue)) {
 		struct ci13xxx_req* mReqEnq;
 
-		mReqEnq = list_entry(mEp->qh[mEp->dir].queue.next,
+		mReqEnq = list_entry(mEp->qh.queue.next,
 				  struct ci13xxx_req, queue);
 		_hardware_enqueue(mEp, mReqEnq);
 	}
@@ -1836,16 +1846,14 @@
 		int type, num, err = -EINVAL;
 		struct usb_ctrlrequest req;
 
-
 		if (mEp->desc == NULL)
 			continue;   /* not configured */
 
-		if ((mEp->dir == RX && hw_test_and_clear_complete(i)) ||
-		    (mEp->dir == TX && hw_test_and_clear_complete(i + 16))) {
+		if (hw_test_and_clear_complete(i)) {
 			err = isr_tr_complete_low(mEp);
 			if (mEp->type == USB_ENDPOINT_XFER_CONTROL) {
 				if (err > 0)   /* needs status phase */
-					err = isr_setup_status_phase(mEp);
+					err = isr_setup_status_phase(udc);
 				if (err < 0) {
 					dbg_event(_usb_addr(mEp),
 						  "ERROR", err);
@@ -1866,15 +1874,22 @@
 			continue;
 		}
 
+		/*
+		 * Flush data and handshake transactions of previous
+		 * setup packet.
+		 */
+		_ep_nuke(&udc->ep0out);
+		_ep_nuke(&udc->ep0in);
+
 		/* read_setup_packet */
 		do {
 			hw_test_and_set_setup_guard();
-			memcpy(&req, &mEp->qh[RX].ptr->setup, sizeof(req));
+			memcpy(&req, &mEp->qh.ptr->setup, sizeof(req));
 		} while (!hw_test_and_clear_setup_guard());
 
 		type = req.bRequestType;
 
-		mEp->dir = (type & USB_DIR_IN) ? TX : RX;
+		udc->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
 
 		dbg_setup(_usb_addr(mEp), &req);
 
@@ -1895,7 +1910,7 @@
 				if (err)
 					break;
 			}
-			err = isr_setup_status_phase(mEp);
+			err = isr_setup_status_phase(udc);
 			break;
 		case USB_REQ_GET_STATUS:
 			if (type != (USB_DIR_IN|USB_RECIP_DEVICE)   &&
@@ -1905,7 +1920,7 @@
 			if (le16_to_cpu(req.wLength) != 2 ||
 			    le16_to_cpu(req.wValue)  != 0)
 				break;
-			err = isr_get_status_response(mEp, &req);
+			err = isr_get_status_response(udc, &req);
 			break;
 		case USB_REQ_SET_ADDRESS:
 			if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
@@ -1916,7 +1931,7 @@
 			err = hw_usb_set_address((u8)le16_to_cpu(req.wValue));
 			if (err)
 				break;
-			err = isr_setup_status_phase(mEp);
+			err = isr_setup_status_phase(udc);
 			break;
 		case USB_REQ_SET_FEATURE:
 			if (type != (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
@@ -1932,12 +1947,12 @@
 			spin_lock(udc->lock);
 			if (err)
 				break;
-			err = isr_setup_status_phase(mEp);
+			err = isr_setup_status_phase(udc);
 			break;
 		default:
 delegate:
 			if (req.wLength == 0)   /* no data phase */
-				mEp->dir = TX;
+				udc->ep0_dir = TX;
 
 			spin_unlock(udc->lock);
 			err = udc->driver->setup(&udc->gadget, &req);
@@ -1968,7 +1983,7 @@
 		     const struct usb_endpoint_descriptor *desc)
 {
 	struct ci13xxx_ep *mEp = container_of(ep, struct ci13xxx_ep, ep);
-	int direction, retval = 0;
+	int retval = 0;
 	unsigned long flags;
 
 	trace("%p, %p", ep, desc);
@@ -1982,7 +1997,7 @@
 
 	mEp->desc = desc;
 
-	if (!list_empty(&mEp->qh[mEp->dir].queue))
+	if (!list_empty(&mEp->qh.queue))
 		warn("enabling a non-empty endpoint!");
 
 	mEp->dir  = usb_endpoint_dir_in(desc) ? TX : RX;
@@ -1991,29 +2006,22 @@
 
 	mEp->ep.maxpacket = __constant_le16_to_cpu(desc->wMaxPacketSize);
 
-	direction = mEp->dir;
-	do {
-		dbg_event(_usb_addr(mEp), "ENABLE", 0);
+	dbg_event(_usb_addr(mEp), "ENABLE", 0);
 
-		mEp->qh[mEp->dir].ptr->cap = 0;
+	mEp->qh.ptr->cap = 0;
 
-		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
-			mEp->qh[mEp->dir].ptr->cap |=  QH_IOS;
-		else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
-			mEp->qh[mEp->dir].ptr->cap &= ~QH_MULT;
-		else
-			mEp->qh[mEp->dir].ptr->cap &= ~QH_ZLT;
+	if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
+		mEp->qh.ptr->cap |=  QH_IOS;
+	else if (mEp->type == USB_ENDPOINT_XFER_ISOC)
+		mEp->qh.ptr->cap &= ~QH_MULT;
+	else
+		mEp->qh.ptr->cap &= ~QH_ZLT;
 
-		mEp->qh[mEp->dir].ptr->cap |=
-			(mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
-		mEp->qh[mEp->dir].ptr->td.next |= TD_TERMINATE;   /* needed? */
+	mEp->qh.ptr->cap |=
+		(mEp->ep.maxpacket << ffs_nr(QH_MAX_PKT)) & QH_MAX_PKT;
+	mEp->qh.ptr->td.next |= TD_TERMINATE;   /* needed? */
 
-		retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
-
-		if (mEp->type == USB_ENDPOINT_XFER_CONTROL)
-			mEp->dir = (mEp->dir == TX) ? RX : TX;
-
-	} while (mEp->dir != direction);
+	retval |= hw_ep_enable(mEp->num, mEp->dir, mEp->type);
 
 	spin_unlock_irqrestore(mEp->lock, flags);
 	return retval;
@@ -2146,7 +2154,7 @@
 	spin_lock_irqsave(mEp->lock, flags);
 
 	if (mEp->type == USB_ENDPOINT_XFER_CONTROL &&
-	    !list_empty(&mEp->qh[mEp->dir].queue)) {
+	    !list_empty(&mEp->qh.queue)) {
 		_ep_nuke(mEp);
 		retval = -EOVERFLOW;
 		warn("endpoint ctrl %X nuked", _usb_addr(mEp));
@@ -2170,9 +2178,9 @@
 	/* push request */
 	mReq->req.status = -EINPROGRESS;
 	mReq->req.actual = 0;
-	list_add_tail(&mReq->queue, &mEp->qh[mEp->dir].queue);
+	list_add_tail(&mReq->queue, &mEp->qh.queue);
 
-	if (list_is_singular(&mEp->qh[mEp->dir].queue))
+	if (list_is_singular(&mEp->qh.queue))
 		retval = _hardware_enqueue(mEp, mReq);
 
 	if (retval == -EALREADY) {
@@ -2199,7 +2207,7 @@
 	trace("%p, %p", ep, req);
 
 	if (ep == NULL || req == NULL || mEp->desc == NULL ||
-	    list_empty(&mReq->queue)  || list_empty(&mEp->qh[mEp->dir].queue))
+	    list_empty(&mReq->queue)  || list_empty(&mEp->qh.queue))
 		return -EINVAL;
 
 	spin_lock_irqsave(mEp->lock, flags);
@@ -2244,7 +2252,7 @@
 #ifndef STALL_IN
 	/* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
 	if (value && mEp->type == USB_ENDPOINT_XFER_BULK && mEp->dir == TX &&
-	    !list_empty(&mEp->qh[mEp->dir].queue)) {
+	    !list_empty(&mEp->qh.queue)) {
 		spin_unlock_irqrestore(mEp->lock, flags);
 		return -EAGAIN;
 	}
@@ -2355,7 +2363,7 @@
 		if (is_active) {
 			pm_runtime_get_sync(&_gadget->dev);
 			hw_device_reset(udc);
-			hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma);
+			hw_device_state(udc->ep0out.qh.dma);
 		} else {
 			hw_device_state(0);
 			if (udc->udc_driver->notify_event)
@@ -2390,7 +2398,8 @@
 		int (*bind)(struct usb_gadget *))
 {
 	struct ci13xxx *udc = _udc;
-	unsigned long i, k, flags;
+	unsigned long flags;
+	int i, j;
 	int retval = -ENOMEM;
 
 	trace("%p", driver);
@@ -2427,45 +2436,46 @@
 
 	info("hw_ep_max = %d", hw_ep_max);
 
-	udc->driver = driver;
 	udc->gadget.dev.driver = NULL;
 
 	retval = 0;
-	for (i = 0; i < hw_ep_max; i++) {
-		struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
+	for (i = 0; i < hw_ep_max/2; i++) {
+		for (j = RX; j <= TX; j++) {
+			int k = i + j * hw_ep_max/2;
+			struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[k];
 
-		scnprintf(mEp->name, sizeof(mEp->name), "ep%i", (int)i);
+			scnprintf(mEp->name, sizeof(mEp->name), "ep%i%s", i,
+					(j == TX)  ? "in" : "out");
 
-		mEp->lock         = udc->lock;
-		mEp->device       = &udc->gadget.dev;
-		mEp->td_pool      = udc->td_pool;
+			mEp->lock         = udc->lock;
+			mEp->device       = &udc->gadget.dev;
+			mEp->td_pool      = udc->td_pool;
 
-		mEp->ep.name      = mEp->name;
-		mEp->ep.ops       = &usb_ep_ops;
-		mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
+			mEp->ep.name      = mEp->name;
+			mEp->ep.ops       = &usb_ep_ops;
+			mEp->ep.maxpacket = CTRL_PAYLOAD_MAX;
 
-		/* this allocation cannot be random */
-		for (k = RX; k <= TX; k++) {
-			INIT_LIST_HEAD(&mEp->qh[k].queue);
+			INIT_LIST_HEAD(&mEp->qh.queue);
 			spin_unlock_irqrestore(udc->lock, flags);
-			mEp->qh[k].ptr = dma_pool_alloc(udc->qh_pool,
-							GFP_KERNEL,
-							&mEp->qh[k].dma);
+			mEp->qh.ptr = dma_pool_alloc(udc->qh_pool, GFP_KERNEL,
+					&mEp->qh.dma);
 			spin_lock_irqsave(udc->lock, flags);
-			if (mEp->qh[k].ptr == NULL)
+			if (mEp->qh.ptr == NULL)
 				retval = -ENOMEM;
 			else
-				memset(mEp->qh[k].ptr, 0,
-				       sizeof(*mEp->qh[k].ptr));
-		}
-		if (i == 0)
-			udc->gadget.ep0 = &mEp->ep;
-		else
+				memset(mEp->qh.ptr, 0, sizeof(*mEp->qh.ptr));
+
+			/* skip ep0 out and in endpoints */
+			if (i == 0)
+				continue;
+
 			list_add_tail(&mEp->ep.ep_list, &udc->gadget.ep_list);
+		}
 	}
 	if (retval)
 		goto done;
 
+	udc->gadget.ep0 = &udc->ep0in.ep;
 	/* bind gadget */
 	driver->driver.bus     = NULL;
 	udc->gadget.dev.driver = &driver->driver;
@@ -2479,6 +2489,7 @@
 		goto done;
 	}
 
+	udc->driver = driver;
 	pm_runtime_get_sync(&udc->gadget.dev);
 	if (udc->udc_driver->flags & CI13XXX_PULLUP_ON_VBUS) {
 		if (udc->vbus_active) {
@@ -2490,14 +2501,12 @@
 		}
 	}
 
-	retval = hw_device_state(udc->ci13xxx_ep[0].qh[RX].dma);
+	retval = hw_device_state(udc->ep0out.qh.dma);
 	if (retval)
 		pm_runtime_put_sync(&udc->gadget.dev);
 
  done:
 	spin_unlock_irqrestore(udc->lock, flags);
-	if (retval)
-		usb_gadget_unregister_driver(driver);
 	return retval;
 }
 EXPORT_SYMBOL(usb_gadget_probe_driver);
@@ -2510,7 +2519,7 @@
 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
 {
 	struct ci13xxx *udc = _udc;
-	unsigned long i, k, flags;
+	unsigned long i, flags;
 
 	trace("%p", driver);
 
@@ -2546,17 +2555,14 @@
 	for (i = 0; i < hw_ep_max; i++) {
 		struct ci13xxx_ep *mEp = &udc->ci13xxx_ep[i];
 
-		if (i == 0)
-			udc->gadget.ep0 = NULL;
-		else if (!list_empty(&mEp->ep.ep_list))
+		if (!list_empty(&mEp->ep.ep_list))
 			list_del_init(&mEp->ep.ep_list);
 
-		for (k = RX; k <= TX; k++)
-			if (mEp->qh[k].ptr != NULL)
-				dma_pool_free(udc->qh_pool,
-					      mEp->qh[k].ptr, mEp->qh[k].dma);
+		if (mEp->qh.ptr != NULL)
+			dma_pool_free(udc->qh_pool, mEp->qh.ptr, mEp->qh.dma);
 	}
 
+	udc->gadget.ep0 = NULL;
 	udc->driver = NULL;
 
 	spin_unlock_irqrestore(udc->lock, flags);
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index f61fed0..a2492b6 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -20,7 +20,7 @@
  * DEFINE
  *****************************************************************************/
 #define CI13XXX_PAGE_SIZE  4096ul /* page size for TD's */
-#define ENDPT_MAX          (16)
+#define ENDPT_MAX          (32)
 #define CTRL_PAYLOAD_MAX   (64)
 #define RX        (0)  /* similar to USB_DIR_OUT but can be used as an index */
 #define TX        (1)  /* similar to USB_DIR_IN  but can be used as an index */
@@ -88,8 +88,7 @@
 		struct list_head   queue;
 		struct ci13xxx_qh *ptr;
 		dma_addr_t         dma;
-	}                                      qh[2];
-	struct usb_request                    *status;
+	}                                      qh;
 	int                                    wedge;
 
 	/* global resources */
@@ -119,9 +118,13 @@
 
 	struct dma_pool           *qh_pool;   /* DMA pool for queue heads */
 	struct dma_pool           *td_pool;   /* DMA pool for transfer descs */
+	struct usb_request        *status;    /* ep0 status request */
 
 	struct usb_gadget          gadget;     /* USB slave device */
 	struct ci13xxx_ep          ci13xxx_ep[ENDPT_MAX]; /* extended endpts */
+	u32                        ep0_dir;    /* ep0 direction */
+#define ep0out ci13xxx_ep[0]
+#define ep0in  ci13xxx_ep[16]
 
 	struct usb_gadget_driver  *driver;     /* 3rd party gadget driver */
 	struct ci13xxx_udc_driver *udc_driver; /* device controller driver */
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index f6ff845..1ba4bef 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -928,8 +928,9 @@
 		 */
 		switch (ctrl->bRequestType & USB_RECIP_MASK) {
 		case USB_RECIP_INTERFACE:
-			if (cdev->config)
-				f = cdev->config->interface[intf];
+			if (!cdev->config || w_index >= MAX_CONFIG_INTERFACES)
+				break;
+			f = cdev->config->interface[intf];
 			break;
 
 		case USB_RECIP_ENDPOINT:
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index b5dbb23..6d8e533 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -293,6 +293,7 @@
 
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
+#include <linux/usb/composite.h>
 
 #include "gadget_chips.h"
 
@@ -2763,7 +2764,7 @@
 			return ERR_PTR(-ENOMEM);
 		common->free_storage_on_release = 1;
 	} else {
-		memset(common, 0, sizeof common);
+		memset(common, 0, sizeof *common);
 		common->free_storage_on_release = 0;
 	}
 
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index 0c8dd81..b120dbb 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -198,10 +198,10 @@
 #define PCH_UDC_BRLEN		0x0F	/* Burst length */
 #define PCH_UDC_THLEN		0x1F	/* Threshold length */
 /* Value of EP Buffer Size */
-#define UDC_EP0IN_BUFF_SIZE	64
-#define UDC_EPIN_BUFF_SIZE	512
-#define UDC_EP0OUT_BUFF_SIZE	64
-#define UDC_EPOUT_BUFF_SIZE	512
+#define UDC_EP0IN_BUFF_SIZE	16
+#define UDC_EPIN_BUFF_SIZE	256
+#define UDC_EP0OUT_BUFF_SIZE	16
+#define UDC_EPOUT_BUFF_SIZE	256
 /* Value of EP maximum packet size */
 #define UDC_EP0IN_MAX_PKT_SIZE	64
 #define UDC_EP0OUT_MAX_PKT_SIZE	64
@@ -351,7 +351,7 @@
 	struct pci_pool		*data_requests;
 	struct pci_pool		*stp_requests;
 	dma_addr_t			dma_addr;
-	unsigned long			ep0out_buf[64];
+	void				*ep0out_buf;
 	struct usb_ctrlrequest		setup_data;
 	unsigned long			phys_addr;
 	void __iomem			*base_addr;
@@ -361,6 +361,8 @@
 
 #define PCH_UDC_PCI_BAR			1
 #define PCI_DEVICE_ID_INTEL_EG20T_UDC	0x8808
+#define PCI_VENDOR_ID_ROHM		0x10DB
+#define PCI_DEVICE_ID_ML7213_IOH_UDC	0x801D
 
 static const char	ep0_string[] = "ep0in";
 static DEFINE_SPINLOCK(udc_stall_spinlock);	/* stall spin lock */
@@ -1219,11 +1221,11 @@
 	dev = ep->dev;
 	if (req->dma_mapped) {
 		if (ep->in)
-			pci_unmap_single(dev->pdev, req->req.dma,
-					 req->req.length, PCI_DMA_TODEVICE);
+			dma_unmap_single(&dev->pdev->dev, req->req.dma,
+					 req->req.length, DMA_TO_DEVICE);
 		else
-			pci_unmap_single(dev->pdev, req->req.dma,
-					 req->req.length, PCI_DMA_FROMDEVICE);
+			dma_unmap_single(&dev->pdev->dev, req->req.dma,
+					 req->req.length, DMA_FROM_DEVICE);
 		req->dma_mapped = 0;
 		req->req.dma = DMA_ADDR_INVALID;
 	}
@@ -1414,7 +1416,6 @@
 
 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
 	td_data = req->td_data;
-	ep->td_data = req->td_data;
 	/* Set the status bits for all descriptors */
 	while (1) {
 		td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
@@ -1613,15 +1614,19 @@
 	if (usbreq->length &&
 	    ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
 		if (ep->in)
-			usbreq->dma = pci_map_single(dev->pdev, usbreq->buf,
-					usbreq->length, PCI_DMA_TODEVICE);
+			usbreq->dma = dma_map_single(&dev->pdev->dev,
+						     usbreq->buf,
+						     usbreq->length,
+						     DMA_TO_DEVICE);
 		else
-			usbreq->dma = pci_map_single(dev->pdev, usbreq->buf,
-					usbreq->length, PCI_DMA_FROMDEVICE);
+			usbreq->dma = dma_map_single(&dev->pdev->dev,
+						     usbreq->buf,
+						     usbreq->length,
+						     DMA_FROM_DEVICE);
 		req->dma_mapped = 1;
 	}
 	if (usbreq->length > 0) {
-		retval = prepare_dma(ep, req, gfp);
+		retval = prepare_dma(ep, req, GFP_ATOMIC);
 		if (retval)
 			goto probe_end;
 	}
@@ -1646,7 +1651,6 @@
 			pch_udc_wait_ep_stall(ep);
 			pch_udc_ep_clear_nak(ep);
 			pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
-			pch_udc_set_dma(dev, DMA_DIR_TX);
 		}
 	}
 	/* Now add this request to the ep's pending requests */
@@ -1926,6 +1930,7 @@
 	    PCH_UDC_BS_DMA_DONE)
 		return;
 	pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
+	pch_udc_ep_set_ddptr(ep, 0);
 	if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
 	    PCH_UDC_RTS_SUCC) {
 		dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
@@ -1963,7 +1968,7 @@
 	u32	epsts;
 	struct pch_udc_ep	*ep;
 
-	ep = &dev->ep[2*ep_num];
+	ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
 	epsts = ep->epsts;
 	ep->epsts = 0;
 
@@ -2008,7 +2013,7 @@
 	struct pch_udc_ep		*ep;
 	struct pch_udc_request		*req = NULL;
 
-	ep = &dev->ep[2*ep_num + 1];
+	ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
 	epsts = ep->epsts;
 	ep->epsts = 0;
 
@@ -2025,10 +2030,11 @@
 	}
 	if (epsts & UDC_EPSTS_HE)
 		return;
-	if (epsts & UDC_EPSTS_RSS)
+	if (epsts & UDC_EPSTS_RSS) {
 		pch_udc_ep_set_stall(ep);
 		pch_udc_enable_ep_interrupts(ep->dev,
 					     PCH_UDC_EPINT(ep->in, ep->num));
+	}
 	if (epsts & UDC_EPSTS_RCS) {
 		if (!dev->prot_stall) {
 			pch_udc_ep_clear_stall(ep);
@@ -2060,8 +2066,10 @@
 {
 	u32	epsts;
 	struct pch_udc_ep	*ep;
+	struct pch_udc_ep	*ep_out;
 
 	ep = &dev->ep[UDC_EP0IN_IDX];
+	ep_out = &dev->ep[UDC_EP0OUT_IDX];
 	epsts = ep->epsts;
 	ep->epsts = 0;
 
@@ -2073,8 +2081,16 @@
 		return;
 	if (epsts & UDC_EPSTS_HE)
 		return;
-	if ((epsts & UDC_EPSTS_TDC) && (!dev->stall))
+	if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
 		pch_udc_complete_transfer(ep);
+		pch_udc_clear_dma(dev, DMA_DIR_RX);
+		ep_out->td_data->status = (ep_out->td_data->status &
+					~PCH_UDC_BUFF_STS) |
+					PCH_UDC_BS_HST_RDY;
+		pch_udc_ep_clear_nak(ep_out);
+		pch_udc_set_dma(dev, DMA_DIR_RX);
+		pch_udc_ep_set_rrdy(ep_out);
+	}
 	/* On IN interrupt, provide data if we have any */
 	if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
 	     !(epsts & UDC_EPSTS_TXEMPTY))
@@ -2102,11 +2118,9 @@
 		dev->stall = 0;
 		dev->ep[UDC_EP0IN_IDX].halted = 0;
 		dev->ep[UDC_EP0OUT_IDX].halted = 0;
-		/* In data not ready */
-		pch_udc_ep_set_nak(&(dev->ep[UDC_EP0IN_IDX]));
 		dev->setup_data = ep->td_stp->request;
 		pch_udc_init_setup_buff(ep->td_stp);
-		pch_udc_clear_dma(dev, DMA_DIR_TX);
+		pch_udc_clear_dma(dev, DMA_DIR_RX);
 		pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
 				      dev->ep[UDC_EP0IN_IDX].in);
 		if ((dev->setup_data.bRequestType & USB_DIR_IN))
@@ -2122,14 +2136,23 @@
 		setup_supported = dev->driver->setup(&dev->gadget,
 						     &dev->setup_data);
 		spin_lock(&dev->lock);
+
+		if (dev->setup_data.bRequestType & USB_DIR_IN) {
+			ep->td_data->status = (ep->td_data->status &
+						~PCH_UDC_BUFF_STS) |
+						PCH_UDC_BS_HST_RDY;
+			pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
+		}
 		/* ep0 in returns data on IN phase */
 		if (setup_supported >= 0 && setup_supported <
 					    UDC_EP0IN_MAX_PKT_SIZE) {
 			pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
 			/* Gadget would have queued a request when
 			 * we called the setup */
-			pch_udc_set_dma(dev, DMA_DIR_RX);
-			pch_udc_ep_clear_nak(ep);
+			if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
+				pch_udc_set_dma(dev, DMA_DIR_RX);
+				pch_udc_ep_clear_nak(ep);
+			}
 		} else if (setup_supported < 0) {
 			/* if unsupported request, then stall */
 			pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
@@ -2142,22 +2165,13 @@
 		}
 	} else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
 		     UDC_EPSTS_OUT_DATA) && !dev->stall) {
-		if (list_empty(&ep->queue)) {
-			dev_err(&dev->pdev->dev, "%s: No request\n", __func__);
-			ep->td_data->status = (ep->td_data->status &
-					       ~PCH_UDC_BUFF_STS) |
-					       PCH_UDC_BS_HST_RDY;
-			pch_udc_set_dma(dev, DMA_DIR_RX);
-		} else {
-			/* control write */
-			/* next function will pickuo an clear the status */
+		pch_udc_clear_dma(dev, DMA_DIR_RX);
+		pch_udc_ep_set_ddptr(ep, 0);
+		if (!list_empty(&ep->queue)) {
 			ep->epsts = stat;
-
-			pch_udc_svc_data_out(dev, 0);
-			/* re-program desc. pointer for possible ZLPs */
-			pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
-			pch_udc_set_dma(dev, DMA_DIR_RX);
+			pch_udc_svc_data_out(dev, PCH_UDC_EP0);
 		}
+		pch_udc_set_dma(dev, DMA_DIR_RX);
 	}
 	pch_udc_ep_set_rrdy(ep);
 }
@@ -2174,7 +2188,7 @@
 	struct pch_udc_ep	*ep;
 	struct pch_udc_request *req;
 
-	ep = &dev->ep[2*ep_num];
+	ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
 	if (!list_empty(&ep->queue)) {
 		req = list_entry(ep->queue.next, struct pch_udc_request, queue);
 		pch_udc_enable_ep_interrupts(ep->dev,
@@ -2196,13 +2210,13 @@
 	for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
 		/* IN */
 		if (ep_intr & (0x1 << i)) {
-			ep = &dev->ep[2*i];
+			ep = &dev->ep[UDC_EPIN_IDX(i)];
 			ep->epsts = pch_udc_read_ep_status(ep);
 			pch_udc_clear_ep_status(ep, ep->epsts);
 		}
 		/* OUT */
 		if (ep_intr & (0x10000 << i)) {
-			ep = &dev->ep[2*i+1];
+			ep = &dev->ep[UDC_EPOUT_IDX(i)];
 			ep->epsts = pch_udc_read_ep_status(ep);
 			pch_udc_clear_ep_status(ep, ep->epsts);
 		}
@@ -2563,9 +2577,6 @@
 	dev->ep[UDC_EP0IN_IDX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
 	dev->ep[UDC_EP0OUT_IDX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
 
-	dev->dma_addr = pci_map_single(dev->pdev, dev->ep0out_buf, 256,
-				  PCI_DMA_FROMDEVICE);
-
 	/* remove ep0 in and out from the list.  They have own pointer */
 	list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
 	list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
@@ -2637,6 +2648,13 @@
 	dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
 	dev->ep[UDC_EP0IN_IDX].td_data = NULL;
 	dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
+
+	dev->ep0out_buf = kzalloc(UDC_EP0OUT_BUFF_SIZE * 4, GFP_KERNEL);
+	if (!dev->ep0out_buf)
+		return -ENOMEM;
+	dev->dma_addr = dma_map_single(&dev->pdev->dev, dev->ep0out_buf,
+				       UDC_EP0OUT_BUFF_SIZE * 4,
+				       DMA_FROM_DEVICE);
 	return 0;
 }
 
@@ -2700,7 +2718,8 @@
 
 	pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
 
-	/* Assues that there are no pending requets with this driver */
+	/* Assures that there are no pending requests with this driver */
+	driver->disconnect(&dev->gadget);
 	driver->unbind(&dev->gadget);
 	dev->gadget.dev.driver = NULL;
 	dev->driver = NULL;
@@ -2750,6 +2769,11 @@
 		pci_pool_destroy(dev->stp_requests);
 	}
 
+	if (dev->dma_addr)
+		dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
+				 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
+	kfree(dev->ep0out_buf);
+
 	pch_udc_exit(dev);
 
 	if (dev->irq_registered)
@@ -2792,11 +2816,7 @@
 	int ret;
 
 	pci_set_power_state(pdev, PCI_D0);
-	ret = pci_restore_state(pdev);
-	if (ret) {
-		dev_err(&pdev->dev, "%s: pci_restore_state failed\n", __func__);
-		return ret;
-	}
+	pci_restore_state(pdev);
 	ret = pci_enable_device(pdev);
 	if (ret) {
 		dev_err(&pdev->dev, "%s: pci_enable_device failed\n", __func__);
@@ -2914,6 +2934,11 @@
 		.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
 		.class_mask = 0xffffffff,
 	},
+	{
+		PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
+		.class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
+		.class_mask = 0xffffffff,
+	},
 	{ 0 },
 };
 
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 2fc8636..12ff6cf 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -131,31 +131,31 @@
  * parameters are in UTF-8 (superset of ASCII's 7 bit characters).
  */
 
-static ushort __initdata idVendor;
+static ushort idVendor;
 module_param(idVendor, ushort, S_IRUGO);
 MODULE_PARM_DESC(idVendor, "USB Vendor ID");
 
-static ushort __initdata idProduct;
+static ushort idProduct;
 module_param(idProduct, ushort, S_IRUGO);
 MODULE_PARM_DESC(idProduct, "USB Product ID");
 
-static ushort __initdata bcdDevice;
+static ushort bcdDevice;
 module_param(bcdDevice, ushort, S_IRUGO);
 MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)");
 
-static char *__initdata iManufacturer;
+static char *iManufacturer;
 module_param(iManufacturer, charp, S_IRUGO);
 MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string");
 
-static char *__initdata iProduct;
+static char *iProduct;
 module_param(iProduct, charp, S_IRUGO);
 MODULE_PARM_DESC(iProduct, "USB Product string");
 
-static char *__initdata iSerialNum;
+static char *iSerialNum;
 module_param(iSerialNum, charp, S_IRUGO);
 MODULE_PARM_DESC(iSerialNum, "1");
 
-static char *__initdata iPNPstring;
+static char *iPNPstring;
 module_param(iPNPstring, charp, S_IRUGO);
 MODULE_PARM_DESC(iPNPstring, "MFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;");
 
@@ -1596,13 +1596,12 @@
 	int status;
 
 	mutex_lock(&usb_printer_gadget.lock_printer_io);
-	class_destroy(usb_gadget_class);
-	unregister_chrdev_region(g_printer_devno, 2);
-
 	status = usb_gadget_unregister_driver(&printer_driver);
 	if (status)
 		ERROR(dev, "usb_gadget_unregister_driver %x\n", status);
 
+	unregister_chrdev_region(g_printer_devno, 2);
+	class_destroy(usb_gadget_class);
 	mutex_unlock(&usb_printer_gadget.lock_printer_io);
 }
 module_exit(cleanup);
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 20d43da..0151185 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -258,7 +258,7 @@
 		break;
 	case R8A66597_BULK:
 		/* isochronous pipes may be used as bulk pipes */
-		if (info->pipe > R8A66597_BASE_PIPENUM_BULK)
+		if (info->pipe >= R8A66597_BASE_PIPENUM_BULK)
 			bufnum = info->pipe - R8A66597_BASE_PIPENUM_BULK;
 		else
 			bufnum = info->pipe - R8A66597_BASE_PIPENUM_ISOC;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 24046c0..0e6afa2 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -151,6 +151,8 @@
 	  Qualcomm chipsets. Root Hub has inbuilt TT.
 	  This driver depends on OTG driver for PHY initialization,
 	  clock management, powering up VBUS, and power management.
+	  This driver is not supported on boards like trout which
+	  has an external PHY.
 
 config USB_EHCI_HCD_PPC_OF
 	bool "EHCI support for PPC USB controller on OF platform bus"
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index 2baf8a8..a869e3c 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -227,8 +227,8 @@
 	 * mark HW unaccessible.  The PM and USB cores make sure that
 	 * the root hub is either suspended or stopped.
 	 */
-	spin_lock_irqsave(&ehci->lock, flags);
 	ehci_prepare_ports_for_controller_suspend(ehci, device_may_wakeup(dev));
+	spin_lock_irqsave(&ehci->lock, flags);
 	ehci_writel(ehci, 0, &ehci->regs->intr_enable);
 	(void)ehci_readl(ehci, &ehci->regs->intr_enable);
 
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 86e4289..5c761df 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -52,7 +52,6 @@
 	struct resource *res;
 	int irq;
 	int retval;
-	unsigned int temp;
 
 	pr_debug("initializing FSL-SOC USB Controller\n");
 
@@ -126,18 +125,6 @@
 		goto err3;
 	}
 
-	/*
-	 * Check if it is MPC5121 SoC, otherwise set pdata->have_sysif_regs
-	 * flag for 83xx or 8536 system interface registers.
-	 */
-	if (pdata->big_endian_mmio)
-		temp = in_be32(hcd->regs + FSL_SOC_USB_ID);
-	else
-		temp = in_le32(hcd->regs + FSL_SOC_USB_ID);
-
-	if ((temp & ID_MSK) != (~((temp & NID_MSK) >> 8) & ID_MSK))
-		pdata->have_sysif_regs = 1;
-
 	/* Enable USB controller, 83xx or 8536 */
 	if (pdata->have_sysif_regs)
 		setbits32(hcd->regs + FSL_SOC_USB_CTRL, 0x4);
diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h
index 2c83537..3fabed3 100644
--- a/drivers/usb/host/ehci-fsl.h
+++ b/drivers/usb/host/ehci-fsl.h
@@ -19,9 +19,6 @@
 #define _EHCI_FSL_H
 
 /* offsets for the non-ehci registers in the FSL SOC USB controller */
-#define FSL_SOC_USB_ID		0x0
-#define ID_MSK			0x3f
-#define NID_MSK			0x3f00
 #define FSL_SOC_USB_ULPIVP	0x170
 #define FSL_SOC_USB_PORTSC1	0x184
 #define PORT_PTS_MSK		(3<<30)
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 6fee3cd..74dcf49 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -572,6 +572,8 @@
 	ehci->iaa_watchdog.function = ehci_iaa_watchdog;
 	ehci->iaa_watchdog.data = (unsigned long) ehci;
 
+	hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
+
 	/*
 	 * hw default: 1K periodic list heads, one per frame.
 	 * periodic_size can shrink by USBCMD update if hcc_params allows.
@@ -579,11 +581,20 @@
 	ehci->periodic_size = DEFAULT_I_TDPS;
 	INIT_LIST_HEAD(&ehci->cached_itd_list);
 	INIT_LIST_HEAD(&ehci->cached_sitd_list);
+
+	if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+		/* periodic schedule size can be smaller than default */
+		switch (EHCI_TUNE_FLS) {
+		case 0: ehci->periodic_size = 1024; break;
+		case 1: ehci->periodic_size = 512; break;
+		case 2: ehci->periodic_size = 256; break;
+		default:	BUG();
+		}
+	}
 	if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0)
 		return retval;
 
 	/* controllers may cache some of the periodic schedule ... */
-	hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
 	if (HCC_ISOC_CACHE(hcc_params))		// full frame cache
 		ehci->i_thresh = 2 + 8;
 	else					// N microframes cached
@@ -637,12 +648,6 @@
 		/* periodic schedule size can be smaller than default */
 		temp &= ~(3 << 2);
 		temp |= (EHCI_TUNE_FLS << 2);
-		switch (EHCI_TUNE_FLS) {
-		case 0: ehci->periodic_size = 1024; break;
-		case 1: ehci->periodic_size = 512; break;
-		case 2: ehci->periodic_size = 256; break;
-		default:	BUG();
-		}
 	}
 	if (HCC_LPM(hcc_params)) {
 		/* support link power management EHCI 1.1 addendum */
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 796ea0c..8a515f0 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -111,6 +111,7 @@
 {
 	int		port;
 	u32		temp;
+	unsigned long	flags;
 
 	/* If remote wakeup is enabled for the root hub but disabled
 	 * for the controller, we must adjust all the port wakeup flags
@@ -120,6 +121,8 @@
 	if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup)
 		return;
 
+	spin_lock_irqsave(&ehci->lock, flags);
+
 	/* clear phy low-power mode before changing wakeup flags */
 	if (ehci->has_hostpc) {
 		port = HCS_N_PORTS(ehci->hcs_params);
@@ -131,7 +134,9 @@
 			temp = ehci_readl(ehci, hostpc_reg);
 			ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg);
 		}
+		spin_unlock_irqrestore(&ehci->lock, flags);
 		msleep(5);
+		spin_lock_irqsave(&ehci->lock, flags);
 	}
 
 	port = HCS_N_PORTS(ehci->hcs_params);
@@ -170,6 +175,8 @@
 	/* Does the root hub have a port wakeup pending? */
 	if (!suspending && (ehci_readl(ehci, &ehci->regs->status) & STS_PCD))
 		usb_hcd_resume_root_hub(ehci_to_hcd(ehci));
+
+	spin_unlock_irqrestore(&ehci->lock, flags);
 }
 
 static int ehci_bus_suspend (struct usb_hcd *hcd)
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index fa59b26..c8e360d 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -21,10 +21,13 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/usb/otg.h>
+#include <linux/usb/ulpi.h>
 #include <linux/slab.h>
 
 #include <mach/mxc_ehci.h>
 
+#include <asm/mach-types.h>
+
 #define ULPI_VIEWPORT_OFFSET	0x170
 
 struct ehci_mxc_priv {
@@ -114,6 +117,7 @@
 	struct usb_hcd *hcd;
 	struct resource *res;
 	int irq, ret;
+	unsigned int flags;
 	struct ehci_mxc_priv *priv;
 	struct device *dev = &pdev->dev;
 	struct ehci_hcd *ehci;
@@ -177,8 +181,8 @@
 		clk_enable(priv->ahbclk);
 	}
 
-	/* "dr" device has its own clock */
-	if (pdev->id == 0) {
+	/* "dr" device has its own clock on i.MX51 */
+	if (cpu_is_mx51() && (pdev->id == 0)) {
 		priv->phy1clk = clk_get(dev, "usb_phy1");
 		if (IS_ERR(priv->phy1clk)) {
 			ret = PTR_ERR(priv->phy1clk);
@@ -240,6 +244,23 @@
 	if (ret)
 		goto err_add;
 
+	if (pdata->otg) {
+		/*
+		 * efikamx and efikasb have some hardware bug which is
+		 * preventing usb to work unless CHRGVBUS is set.
+		 * It's in violation of USB specs
+		 */
+		if (machine_is_mx51_efikamx() || machine_is_mx51_efikasb()) {
+			flags = otg_io_read(pdata->otg, ULPI_OTG_CTRL);
+			flags |= ULPI_OTG_CTRL_CHRGVBUS;
+			ret = otg_io_write(pdata->otg, flags, ULPI_OTG_CTRL);
+			if (ret) {
+				dev_err(dev, "unable to set CHRVBUS\n");
+				goto err_add;
+			}
+		}
+	}
+
 	return 0;
 
 err_add:
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 680f2ef..f784ceb 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -796,7 +796,7 @@
 	hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev,
 			dev_name(&pdev->dev));
 	if (!hcd) {
-		dev_dbg(&pdev->dev, "failed to create hcd with err %d\n", ret);
+		dev_err(&pdev->dev, "failed to create hcd with err %d\n", ret);
 		ret = -ENOMEM;
 		goto err_create_hcd;
 	}
@@ -864,7 +864,7 @@
 
 	ret = omap_start_ehc(omap, hcd);
 	if (ret) {
-		dev_dbg(&pdev->dev, "failed to start ehci\n");
+		dev_err(&pdev->dev, "failed to start ehci with err %d\n", ret);
 		goto err_start;
 	}
 
@@ -879,7 +879,7 @@
 
 	ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
 	if (ret) {
-		dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
+		dev_err(&pdev->dev, "failed to add hcd with err %d\n", ret);
 		goto err_add_hcd;
 	}
 
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 76179c3..07bb982 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -44,28 +44,35 @@
 	return 0;
 }
 
-static int ehci_quirk_amd_SB800(struct ehci_hcd *ehci)
+static int ehci_quirk_amd_hudson(struct ehci_hcd *ehci)
 {
 	struct pci_dev *amd_smbus_dev;
 	u8 rev = 0;
 
 	amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL);
-	if (!amd_smbus_dev)
-		return 0;
-
-	pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
-	if (rev < 0x40) {
-		pci_dev_put(amd_smbus_dev);
-		amd_smbus_dev = NULL;
-		return 0;
+	if (amd_smbus_dev) {
+		pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
+		if (rev < 0x40) {
+			pci_dev_put(amd_smbus_dev);
+			amd_smbus_dev = NULL;
+			return 0;
+		}
+	} else {
+		amd_smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x780b, NULL);
+		if (!amd_smbus_dev)
+			return 0;
+		pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
+		if (rev < 0x11 || rev > 0x18) {
+			pci_dev_put(amd_smbus_dev);
+			amd_smbus_dev = NULL;
+			return 0;
+		}
 	}
 
 	if (!amd_nb_dev)
 		amd_nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL);
-	if (!amd_nb_dev)
-		ehci_err(ehci, "QUIRK: unable to get AMD NB device\n");
 
-	ehci_info(ehci, "QUIRK: Enable AMD SB800 L1 fix\n");
+	ehci_info(ehci, "QUIRK: Enable exception for AMD Hudson ASPM\n");
 
 	pci_dev_put(amd_smbus_dev);
 	amd_smbus_dev = NULL;
@@ -131,7 +138,7 @@
 	/* cache this readonly data; minimize chip reads */
 	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
 
-	if (ehci_quirk_amd_SB800(ehci))
+	if (ehci_quirk_amd_hudson(ehci))
 		ehci->amd_l1_fix = 1;
 
 	retval = ehci_halt(ehci);
@@ -360,8 +367,8 @@
 	 * mark HW unaccessible.  The PM and USB cores make sure that
 	 * the root hub is either suspended or stopped.
 	 */
-	spin_lock_irqsave (&ehci->lock, flags);
 	ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup);
+	spin_lock_irqsave (&ehci->lock, flags);
 	ehci_writel(ehci, 0, &ehci->regs->intr_enable);
 	(void)ehci_readl(ehci, &ehci->regs->intr_enable);
 
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 574b99e..79a66d6 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -262,19 +262,24 @@
 	}
 }
 
-struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = {
+static struct fsl_usb2_platform_data fsl_usb2_mpc5121_pd = {
 	.big_endian_desc = 1,
 	.big_endian_mmio = 1,
 	.es = 1,
+	.have_sysif_regs = 0,
 	.le_setup_buf = 1,
 	.init = fsl_usb2_mpc5121_init,
 	.exit = fsl_usb2_mpc5121_exit,
 };
 #endif /* CONFIG_PPC_MPC512x */
 
+static struct fsl_usb2_platform_data fsl_usb2_mpc8xxx_pd = {
+	.have_sysif_regs = 1,
+};
+
 static const struct of_device_id fsl_usb2_mph_dr_of_match[] = {
-	{ .compatible = "fsl-usb2-mph", },
-	{ .compatible = "fsl-usb2-dr", },
+	{ .compatible = "fsl-usb2-mph", .data = &fsl_usb2_mpc8xxx_pd, },
+	{ .compatible = "fsl-usb2-dr", .data = &fsl_usb2_mpc8xxx_pd, },
 #ifdef CONFIG_PPC_MPC512x
 	{ .compatible = "fsl,mpc5121-usb2-dr", .data = &fsl_usb2_mpc5121_pd, },
 #endif
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 990f06b..2e9602a 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -861,6 +861,7 @@
 			DBG("dev %d ep%d maxpacket %d\n",
 				udev->devnum, epnum, ep->maxpacket);
 			retval = -EINVAL;
+			kfree(ep);
 			goto fail;
 		}
 
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index df558f6..3e8211c 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -308,11 +308,8 @@
 /* Ring the host controller doorbell after placing a command on the ring */
 void xhci_ring_cmd_db(struct xhci_hcd *xhci)
 {
-	u32 temp;
-
 	xhci_dbg(xhci, "// Ding dong!\n");
-	temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
-	xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
+	xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
 	/* Flush PCI posted writes */
 	xhci_readl(xhci, &xhci->dba->doorbell[0]);
 }
@@ -322,26 +319,24 @@
 		unsigned int ep_index,
 		unsigned int stream_id)
 {
-	struct xhci_virt_ep *ep;
-	unsigned int ep_state;
-	u32 field;
 	__u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
+	struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+	unsigned int ep_state = ep->ep_state;
 
-	ep = &xhci->devs[slot_id]->eps[ep_index];
-	ep_state = ep->ep_state;
 	/* Don't ring the doorbell for this endpoint if there are pending
-	 * cancellations because the we don't want to interrupt processing.
+	 * cancellations because we don't want to interrupt processing.
 	 * We don't want to restart any stream rings if there's a set dequeue
 	 * pointer command pending because the device can choose to start any
 	 * stream once the endpoint is on the HW schedule.
 	 * FIXME - check all the stream rings for pending cancellations.
 	 */
-	if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
-			&& !(ep_state & EP_HALTED)) {
-		field = xhci_readl(xhci, db_addr) & DB_MASK;
-		field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
-		xhci_writel(xhci, field, db_addr);
-	}
+	if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
+	    (ep_state & EP_HALTED))
+		return;
+	xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
+	/* The CPU has better things to do at this point than wait for a
+	 * write-posting flush.  It'll get there soon enough.
+	 */
 }
 
 /* Ring the doorbell for any rings with pending URBs */
@@ -1188,7 +1183,7 @@
 
 	addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * (port_id - 1);
 	temp = xhci_readl(xhci, addr);
-	if ((temp & PORT_CONNECT) && (hcd->state == HC_STATE_SUSPENDED)) {
+	if (hcd->state == HC_STATE_SUSPENDED) {
 		xhci_dbg(xhci, "resume root hub\n");
 		usb_hcd_resume_root_hub(hcd);
 	}
@@ -1710,8 +1705,7 @@
 		/* Others already handled above */
 		break;
 	}
-	dev_dbg(&td->urb->dev->dev,
-			"ep %#x - asked for %d bytes, "
+	xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
 			"%d bytes untransferred\n",
 			td->urb->ep->desc.bEndpointAddress,
 			td->urb->transfer_buffer_length,
@@ -2389,7 +2383,8 @@
 	}
 	xhci_dbg(xhci, "\n");
 	if (!in_interrupt())
-		dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n",
+		xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, "
+				"num_trbs = %d\n",
 				urb->ep->desc.bEndpointAddress,
 				urb->transfer_buffer_length,
 				num_trbs);
@@ -2414,14 +2409,17 @@
 
 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
 		unsigned int ep_index, unsigned int stream_id, int start_cycle,
-		struct xhci_generic_trb *start_trb, struct xhci_td *td)
+		struct xhci_generic_trb *start_trb)
 {
 	/*
 	 * Pass all the TRBs to the hardware at once and make sure this write
 	 * isn't reordered.
 	 */
 	wmb();
-	start_trb->field[3] |= start_cycle;
+	if (start_cycle)
+		start_trb->field[3] |= start_cycle;
+	else
+		start_trb->field[3] &= ~0x1;
 	xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
 }
 
@@ -2449,7 +2447,7 @@
 	 * to set the polling interval (once the API is added).
 	 */
 	if (xhci_interval != ep_interval) {
-		if (!printk_ratelimit())
+		if (printk_ratelimit())
 			dev_dbg(&urb->dev->dev, "Driver uses different interval"
 					" (%d microframe%s) than xHCI "
 					"(%d microframe%s)\n",
@@ -2551,9 +2549,11 @@
 		u32 remainder = 0;
 
 		/* Don't change the cycle bit of the first TRB until later */
-		if (first_trb)
+		if (first_trb) {
 			first_trb = false;
-		else
+			if (start_cycle == 0)
+				field |= 0x1;
+		} else
 			field |= ep_ring->cycle_state;
 
 		/* Chain all the TRBs together; clear the chain bit in the last
@@ -2625,7 +2625,7 @@
 
 	check_trb_math(urb, num_trbs, running_total);
 	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
-			start_cycle, start_trb, td);
+			start_cycle, start_trb);
 	return 0;
 }
 
@@ -2671,7 +2671,8 @@
 	/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
 
 	if (!in_interrupt())
-		dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
+		xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), "
+				"addr = %#llx, num_trbs = %d\n",
 				urb->ep->desc.bEndpointAddress,
 				urb->transfer_buffer_length,
 				urb->transfer_buffer_length,
@@ -2711,9 +2712,11 @@
 		field = 0;
 
 		/* Don't change the cycle bit of the first TRB until later */
-		if (first_trb)
+		if (first_trb) {
 			first_trb = false;
-		else
+			if (start_cycle == 0)
+				field |= 0x1;
+		} else
 			field |= ep_ring->cycle_state;
 
 		/* Chain all the TRBs together; clear the chain bit in the last
@@ -2757,7 +2760,7 @@
 
 	check_trb_math(urb, num_trbs, running_total);
 	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
-			start_cycle, start_trb, td);
+			start_cycle, start_trb);
 	return 0;
 }
 
@@ -2818,13 +2821,17 @@
 	/* Queue setup TRB - see section 6.4.1.2.1 */
 	/* FIXME better way to translate setup_packet into two u32 fields? */
 	setup = (struct usb_ctrlrequest *) urb->setup_packet;
+	field = 0;
+	field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
+	if (start_cycle == 0)
+		field |= 0x1;
 	queue_trb(xhci, ep_ring, false, true,
 			/* FIXME endianness is probably going to bite my ass here. */
 			setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
 			setup->wIndex | setup->wLength << 16,
 			TRB_LEN(8) | TRB_INTR_TARGET(0),
 			/* Immediate data in pointer */
-			TRB_IDT | TRB_TYPE(TRB_SETUP));
+			field);
 
 	/* If there's data, queue data TRBs */
 	field = 0;
@@ -2859,7 +2866,7 @@
 			field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
 
 	giveback_first_trb(xhci, slot_id, ep_index, 0,
-			start_cycle, start_trb, td);
+			start_cycle, start_trb);
 	return 0;
 }
 
@@ -2900,6 +2907,7 @@
 	int running_total, trb_buff_len, td_len, td_remain_len, ret;
 	u64 start_addr, addr;
 	int i, j;
+	bool more_trbs_coming;
 
 	ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
 
@@ -2910,7 +2918,7 @@
 	}
 
 	if (!in_interrupt())
-		dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d),"
+		xhci_dbg(xhci, "ep %#x - urb len = %#x (%d),"
 				" addr = %#llx, num_tds = %d\n",
 				urb->ep->desc.bEndpointAddress,
 				urb->transfer_buffer_length,
@@ -2950,7 +2958,10 @@
 				field |= TRB_TYPE(TRB_ISOC);
 				/* Assume URB_ISO_ASAP is set */
 				field |= TRB_SIA;
-				if (i > 0)
+				if (i == 0) {
+					if (start_cycle == 0)
+						field |= 0x1;
+				} else
 					field |= ep_ring->cycle_state;
 				first_trb = false;
 			} else {
@@ -2965,9 +2976,11 @@
 			 */
 			if (j < trbs_per_td - 1) {
 				field |= TRB_CHAIN;
+				more_trbs_coming = true;
 			} else {
 				td->last_trb = ep_ring->enqueue;
 				field |= TRB_IOC;
+				more_trbs_coming = false;
 			}
 
 			/* Calculate TRB length */
@@ -2980,7 +2993,7 @@
 			length_field = TRB_LEN(trb_buff_len) |
 				remainder |
 				TRB_INTR_TARGET(0);
-			queue_trb(xhci, ep_ring, false, false,
+			queue_trb(xhci, ep_ring, false, more_trbs_coming,
 				lower_32_bits(addr),
 				upper_32_bits(addr),
 				length_field,
@@ -3003,10 +3016,8 @@
 		}
 	}
 
-	wmb();
-	start_trb->field[3] |= start_cycle;
-
-	xhci_ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
+	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+			start_cycle, start_trb);
 	return 0;
 }
 
@@ -3064,7 +3075,7 @@
 	 * to set the polling interval (once the API is added).
 	 */
 	if (xhci_interval != ep_interval) {
-		if (!printk_ratelimit())
+		if (printk_ratelimit())
 			dev_dbg(&urb->dev->dev, "Driver uses different interval"
 					" (%d microframe%s) than xHCI "
 					"(%d microframe%s)\n",
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 45e4a31..34cf4e1 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -226,7 +226,8 @@
 static int xhci_setup_msix(struct xhci_hcd *xhci)
 {
 	int i, ret = 0;
-	struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+	struct usb_hcd *hcd = xhci_to_hcd(xhci);
+	struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
 
 	/*
 	 * calculate number of msi-x vectors supported.
@@ -265,6 +266,7 @@
 			goto disable_msix;
 	}
 
+	hcd->msix_enabled = 1;
 	return ret;
 
 disable_msix:
@@ -280,7 +282,8 @@
 /* Free any IRQs and disable MSI-X */
 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
 {
-	struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+	struct usb_hcd *hcd = xhci_to_hcd(xhci);
+	struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
 
 	xhci_free_irq(xhci);
 
@@ -292,6 +295,7 @@
 		pci_disable_msi(pdev);
 	}
 
+	hcd->msix_enabled = 0;
 	return;
 }
 
@@ -508,9 +512,10 @@
 	spin_lock_irq(&xhci->lock);
 	xhci_halt(xhci);
 	xhci_reset(xhci);
-	xhci_cleanup_msix(xhci);
 	spin_unlock_irq(&xhci->lock);
 
+	xhci_cleanup_msix(xhci);
+
 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
 	/* Tell the event ring poll function not to reschedule */
 	xhci->zombie = 1;
@@ -544,9 +549,10 @@
 
 	spin_lock_irq(&xhci->lock);
 	xhci_halt(xhci);
-	xhci_cleanup_msix(xhci);
 	spin_unlock_irq(&xhci->lock);
 
+	xhci_cleanup_msix(xhci);
+
 	xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
 		    xhci_readl(xhci, &xhci->op_regs->status));
 }
@@ -647,6 +653,7 @@
 	int			rc = 0;
 	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
 	u32			command;
+	int			i;
 
 	spin_lock_irq(&xhci->lock);
 	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
@@ -677,10 +684,15 @@
 		spin_unlock_irq(&xhci->lock);
 		return -ETIMEDOUT;
 	}
-	/* step 5: remove core well power */
-	xhci_cleanup_msix(xhci);
 	spin_unlock_irq(&xhci->lock);
 
+	/* step 5: remove core well power */
+	/* synchronize irq when using MSI-X */
+	if (xhci->msix_entries) {
+		for (i = 0; i < xhci->msix_count; i++)
+			synchronize_irq(xhci->msix_entries[i].vector);
+	}
+
 	return rc;
 }
 
@@ -694,7 +706,6 @@
 {
 	u32			command, temp = 0;
 	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
-	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
 	int	old_state, retval;
 
 	old_state = hcd->state;
@@ -729,9 +740,8 @@
 		xhci_dbg(xhci, "Stop HCD\n");
 		xhci_halt(xhci);
 		xhci_reset(xhci);
-		if (hibernated)
-			xhci_cleanup_msix(xhci);
 		spin_unlock_irq(&xhci->lock);
+		xhci_cleanup_msix(xhci);
 
 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
 		/* Tell the event ring poll function not to reschedule */
@@ -765,30 +775,6 @@
 		return retval;
 	}
 
-	spin_unlock_irq(&xhci->lock);
-	/* Re-setup MSI-X */
-	if (hcd->irq)
-		free_irq(hcd->irq, hcd);
-	hcd->irq = -1;
-
-	retval = xhci_setup_msix(xhci);
-	if (retval)
-		/* fall back to msi*/
-		retval = xhci_setup_msi(xhci);
-
-	if (retval) {
-		/* fall back to legacy interrupt*/
-		retval = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
-					hcd->irq_descr, hcd);
-		if (retval) {
-			xhci_err(xhci, "request interrupt %d failed\n",
-					pdev->irq);
-			return retval;
-		}
-		hcd->irq = pdev->irq;
-	}
-
-	spin_lock_irq(&xhci->lock);
 	/* step 4: set Run/Stop bit */
 	command = xhci_readl(xhci, &xhci->op_regs->command);
 	command |= CMD_RUN;
@@ -2445,8 +2431,12 @@
 		xhci_err(xhci, "Error while assigning device slot ID\n");
 		return 0;
 	}
-	/* xhci_alloc_virt_device() does not touch rings; no need to lock */
-	if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) {
+	/* xhci_alloc_virt_device() does not touch rings; no need to lock.
+	 * Use GFP_NOIO, since this function can be called from
+	 * xhci_discover_or_reset_device(), which may be called as part of
+	 * mass storage driver error handling.
+	 */
+	if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
 		/* Disable slot, if we can do it without mem alloc */
 		xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
 		spin_lock_irqsave(&xhci->lock, flags);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 170c367..7f236fd 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -436,22 +436,18 @@
 /**
  * struct doorbell_array
  *
+ * Bits  0 -  7: Endpoint target
+ * Bits  8 - 15: RsvdZ
+ * Bits 16 - 31: Stream ID
+ *
  * Section 5.6
  */
 struct xhci_doorbell_array {
 	u32	doorbell[256];
 };
 
-#define	DB_TARGET_MASK		0xFFFFFF00
-#define	DB_STREAM_ID_MASK	0x0000FFFF
-#define	DB_TARGET_HOST		0x0
-#define	DB_STREAM_ID_HOST	0x0
-#define	DB_MASK			(0xff << 8)
-
-/* Endpoint Target - bits 0:7 */
-#define EPI_TO_DB(p)		(((p) + 1) & 0xff)
-#define STREAM_ID_TO_DB(p)	(((p) & 0xffff) << 16)
-
+#define DB_VALUE(ep, stream)	((((ep) + 1) & 0xff) | ((stream) << 16))
+#define DB_VALUE_HOST		0x00000000
 
 /**
  * struct xhci_protocol_caps
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
index 1732d9b..1616ad1 100644
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -45,7 +45,7 @@
 
 static void change_color(struct usb_led *led)
 {
-	int retval;
+	int retval = 0;
 	unsigned char *buffer;
 
 	buffer = kmalloc(8, GFP_KERNEL);
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 4ff2158..f7a2057 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -776,7 +776,6 @@
 	{ USB_DEVICE(0x0557, 0x2001) },
 	{ USB_DEVICE(0x0729, 0x1284) },
 	{ USB_DEVICE(0x1293, 0x0002) },
-	{ USB_DEVICE(0x1293, 0x0002) },
 	{ USB_DEVICE(0x050d, 0x0002) },
 	{ }						/* Terminating entry */
 };
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index eeba228..9d49d1c 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -404,6 +404,7 @@
 		musb->xceiv->set_power = bfin_musb_set_power;
 
 	musb->isr = blackfin_interrupt;
+	musb->double_buffer_not_ok = true;
 
 	return 0;
 }
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 07cf394..54a8bd1 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -128,12 +128,7 @@
 
 static inline struct musb *dev_to_musb(struct device *dev)
 {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
-	/* usbcore insists dev->driver_data is a "struct hcd *" */
-	return hcd_to_musb(dev_get_drvdata(dev));
-#else
 	return dev_get_drvdata(dev);
-#endif
 }
 
 /*-------------------------------------------------------------------------*/
@@ -1876,10 +1871,9 @@
 	musb = kzalloc(sizeof *musb, GFP_KERNEL);
 	if (!musb)
 		return NULL;
-	dev_set_drvdata(dev, musb);
 
 #endif
-
+	dev_set_drvdata(dev, musb);
 	musb->mregs = mbase;
 	musb->ctrl_base = mbase;
 	musb->nIrq = -ENODEV;
@@ -2191,7 +2185,7 @@
 	void __iomem	*base;
 
 	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!iomem || irq == 0)
+	if (!iomem || irq <= 0)
 		return -ENODEV;
 
 	base = ioremap(iomem->start, resource_size(iomem));
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index d0c236f..d74a811 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -488,6 +488,18 @@
 	unsigned		set_address:1;
 	unsigned		test_mode:1;
 	unsigned		softconnect:1;
+	/*
+	 * FIXME: Remove this flag.
+	 *
+	 * This is only added to allow Blackfin to work
+	 * with current driver. For some unknown reason
+	 * Blackfin doesn't work with double buffering
+	 * and that's enabled by default.
+	 *
+	 * We added this flag to forcefully disable double
+	 * buffering until we get it working.
+	 */
+	unsigned                double_buffer_not_ok:1 __deprecated;
 
 	u8			address;
 	u8			test_mode_nr;
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index 916065b..3a97c4e 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -169,6 +169,9 @@
 							dma_addr_t dma_addr,
 							u32 length);
 	int			(*channel_abort)(struct dma_channel *);
+	int			(*is_compatible)(struct dma_channel *channel,
+							u16 maxpacket,
+							void *buf, u32 length);
 };
 
 /* called after channel_program(), may indicate a fault */
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index ed58c6c..2fe3046 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -92,11 +92,33 @@
 
 /* ----------------------------------------------------------------------- */
 
+#define is_buffer_mapped(req) (is_dma_capable() && \
+					(req->map_state != UN_MAPPED))
+
 /* Maps the buffer to dma  */
 
 static inline void map_dma_buffer(struct musb_request *request,
-				struct musb *musb)
+			struct musb *musb, struct musb_ep *musb_ep)
 {
+	int compatible = true;
+	struct dma_controller *dma = musb->dma_controller;
+
+	request->map_state = UN_MAPPED;
+
+	if (!is_dma_capable() || !musb_ep->dma)
+		return;
+
+	/* Check if DMA engine can handle this request.
+	 * DMA code must reject the USB request explicitly.
+	 * Default behaviour is to map the request.
+	 */
+	if (dma->is_compatible)
+		compatible = dma->is_compatible(musb_ep->dma,
+				musb_ep->packet_sz, request->request.buf,
+				request->request.length);
+	if (!compatible)
+		return;
+
 	if (request->request.dma == DMA_ADDR_INVALID) {
 		request->request.dma = dma_map_single(
 				musb->controller,
@@ -105,7 +127,7 @@
 				request->tx
 					? DMA_TO_DEVICE
 					: DMA_FROM_DEVICE);
-		request->mapped = 1;
+		request->map_state = MUSB_MAPPED;
 	} else {
 		dma_sync_single_for_device(musb->controller,
 			request->request.dma,
@@ -113,7 +135,7 @@
 			request->tx
 				? DMA_TO_DEVICE
 				: DMA_FROM_DEVICE);
-		request->mapped = 0;
+		request->map_state = PRE_MAPPED;
 	}
 }
 
@@ -121,11 +143,14 @@
 static inline void unmap_dma_buffer(struct musb_request *request,
 				struct musb *musb)
 {
+	if (!is_buffer_mapped(request))
+		return;
+
 	if (request->request.dma == DMA_ADDR_INVALID) {
 		DBG(20, "not unmapping a never mapped buffer\n");
 		return;
 	}
-	if (request->mapped) {
+	if (request->map_state == MUSB_MAPPED) {
 		dma_unmap_single(musb->controller,
 			request->request.dma,
 			request->request.length,
@@ -133,16 +158,15 @@
 				? DMA_TO_DEVICE
 				: DMA_FROM_DEVICE);
 		request->request.dma = DMA_ADDR_INVALID;
-		request->mapped = 0;
-	} else {
+	} else { /* PRE_MAPPED */
 		dma_sync_single_for_cpu(musb->controller,
 			request->request.dma,
 			request->request.length,
 			request->tx
 				? DMA_TO_DEVICE
 				: DMA_FROM_DEVICE);
-
 	}
+	request->map_state = UN_MAPPED;
 }
 
 /*
@@ -172,8 +196,7 @@
 
 	ep->busy = 1;
 	spin_unlock(&musb->lock);
-	if (is_dma_capable() && ep->dma)
-		unmap_dma_buffer(req, musb);
+	unmap_dma_buffer(req, musb);
 	if (request->status == 0)
 		DBG(5, "%s done request %p,  %d/%d\n",
 				ep->end_point.name, request,
@@ -335,7 +358,7 @@
 			csr);
 
 #ifndef	CONFIG_MUSB_PIO_ONLY
-	if (is_dma_capable() && musb_ep->dma) {
+	if (is_buffer_mapped(req)) {
 		struct dma_controller	*c = musb->dma_controller;
 		size_t request_size;
 
@@ -436,8 +459,7 @@
 		 * Unmap the dma buffer back to cpu if dma channel
 		 * programming fails
 		 */
-		if (is_dma_capable() && musb_ep->dma)
-			unmap_dma_buffer(req, musb);
+		unmap_dma_buffer(req, musb);
 
 		musb_write_fifo(musb_ep->hw_ep, fifo_count,
 				(u8 *) (request->buf + request->actual));
@@ -627,7 +649,7 @@
 		return;
 	}
 
-	if (is_cppi_enabled() && musb_ep->dma) {
+	if (is_cppi_enabled() && is_buffer_mapped(req)) {
 		struct dma_controller	*c = musb->dma_controller;
 		struct dma_channel	*channel = musb_ep->dma;
 
@@ -658,7 +680,7 @@
 		len = musb_readw(epio, MUSB_RXCOUNT);
 		if (request->actual < request->length) {
 #ifdef CONFIG_USB_INVENTRA_DMA
-			if (is_dma_capable() && musb_ep->dma) {
+			if (is_buffer_mapped(req)) {
 				struct dma_controller	*c;
 				struct dma_channel	*channel;
 				int			use_dma = 0;
@@ -742,7 +764,7 @@
 			fifo_count = min_t(unsigned, len, fifo_count);
 
 #ifdef	CONFIG_USB_TUSB_OMAP_DMA
-			if (tusb_dma_omap() && musb_ep->dma) {
+			if (tusb_dma_omap() && is_buffer_mapped(req)) {
 				struct dma_controller *c = musb->dma_controller;
 				struct dma_channel *channel = musb_ep->dma;
 				u32 dma_addr = request->dma + request->actual;
@@ -762,7 +784,7 @@
 			 * programming fails. This buffer is mapped if the
 			 * channel allocation is successful
 			 */
-			 if (is_dma_capable() && musb_ep->dma) {
+			 if (is_buffer_mapped(req)) {
 				unmap_dma_buffer(req, musb);
 
 				/*
@@ -989,7 +1011,11 @@
 		/* Set TXMAXP with the FIFO size of the endpoint
 		 * to disable double buffering mode.
 		 */
-		musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
+		if (musb->double_buffer_not_ok)
+			musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
+		else
+			musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
+					| (musb_ep->hb_mult << 11));
 
 		csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
 		if (musb_readw(regs, MUSB_TXCSR)
@@ -1025,7 +1051,11 @@
 		/* Set RXMAXP with the FIFO size of the endpoint
 		 * to disable double buffering mode.
 		 */
-		musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
+		if (musb->double_buffer_not_ok)
+			musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
+		else
+			musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
+					| (musb_ep->hb_mult << 11));
 
 		/* force shared fifo to OUT-only mode */
 		if (hw_ep->is_shared_fifo) {
@@ -1214,10 +1244,7 @@
 	request->epnum = musb_ep->current_epnum;
 	request->tx = musb_ep->is_in;
 
-	if (is_dma_capable() && musb_ep->dma)
-		map_dma_buffer(request, musb);
-	else
-		request->mapped = 0;
+	map_dma_buffer(request, musb, musb_ep);
 
 	spin_lock_irqsave(&musb->lock, lockflags);
 
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index dec8dc0..a55354f 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -35,13 +35,19 @@
 #ifndef __MUSB_GADGET_H
 #define __MUSB_GADGET_H
 
+enum buffer_map_state {
+	UN_MAPPED = 0,
+	PRE_MAPPED,
+	MUSB_MAPPED
+};
+
 struct musb_request {
 	struct usb_request	request;
 	struct musb_ep		*ep;
 	struct musb		*musb;
 	u8 tx;			/* endpoint direction */
 	u8 epnum;
-	u8 mapped;
+	enum buffer_map_state map_state;
 };
 
 static inline struct musb_request *to_musb_request(struct usb_request *req)
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 4d5bcb4..0f523d7 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -609,7 +609,7 @@
 	/* Set RXMAXP with the FIFO size of the endpoint
 	 * to disable double buffer mode.
 	 */
-	if (musb->hwvers < MUSB_HWVERS_2000)
+	if (musb->double_buffer_not_ok)
 		musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
 	else
 		musb_writew(ep->regs, MUSB_RXMAXP,
@@ -784,14 +784,13 @@
 		/* protocol/endpoint/interval/NAKlimit */
 		if (epnum) {
 			musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
-			if (can_bulk_split(musb, qh->type))
+			if (musb->double_buffer_not_ok)
 				musb_writew(epio, MUSB_TXMAXP,
-					packet_sz
-					| ((hw_ep->max_packet_sz_tx /
-						packet_sz) - 1) << 11);
+						hw_ep->max_packet_sz_tx);
 			else
 				musb_writew(epio, MUSB_TXMAXP,
-					packet_sz);
+						qh->maxpacket |
+						((qh->hb_mult - 1) << 11));
 			musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
 		} else {
 			musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index f763d62..21056c9 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -94,24 +94,33 @@
 {
 	musb_writew(mbase,
 		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_LOW),
-		((u16)((u32) dma_addr & 0xFFFF)));
+		dma_addr);
 	musb_writew(mbase,
 		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDR_HIGH),
-		((u16)(((u32) dma_addr >> 16) & 0xFFFF)));
+		(dma_addr >> 16));
 }
 
 static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel)
 {
-	return musb_readl(mbase,
+	u32 count = musb_readw(mbase,
 		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH));
+
+	count = count << 16;
+
+	count |= musb_readw(mbase,
+		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW));
+
+	return count;
 }
 
 static inline void musb_write_hsdma_count(void __iomem *mbase,
 				u8 bchannel, u32 len)
 {
-	musb_writel(mbase,
+	musb_writew(mbase,
+		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW),len);
+	musb_writew(mbase,
 		MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH),
-		len);
+		(len >> 16));
 }
 
 #endif /* CONFIG_BLACKFIN */
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index 9fb875d..9ffc823 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -103,6 +103,8 @@
 	  required after resetting the hardware and power management.
 	  This driver is required even for peripheral only or host only
 	  mode configurations.
+	  This driver is not supported on boards like trout which
+	  has an external PHY.
 
 config AB8500_USB
         tristate "AB8500 USB Transceiver Driver"
diff --git a/drivers/usb/otg/nop-usb-xceiv.c b/drivers/usb/otg/nop-usb-xceiv.c
index e70014a..8acf165 100644
--- a/drivers/usb/otg/nop-usb-xceiv.c
+++ b/drivers/usb/otg/nop-usb-xceiv.c
@@ -132,6 +132,8 @@
 
 	platform_set_drvdata(pdev, nop);
 
+	BLOCKING_INIT_NOTIFIER_HEAD(&nop->otg.notifier);
+
 	return 0;
 exit:
 	kfree(nop);
diff --git a/drivers/usb/otg/ulpi.c b/drivers/usb/otg/ulpi.c
index 059d9ac..770d799 100644
--- a/drivers/usb/otg/ulpi.c
+++ b/drivers/usb/otg/ulpi.c
@@ -45,7 +45,7 @@
 /* ULPI hardcoded IDs, used for probing */
 static struct ulpi_info ulpi_ids[] = {
 	ULPI_INFO(ULPI_ID(0x04cc, 0x1504), "NXP ISP1504"),
-	ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB3319"),
+	ULPI_INFO(ULPI_ID(0x0424, 0x0006), "SMSC USB331x"),
 };
 
 static int ulpi_set_otg_flags(struct otg_transceiver *otg)
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 63f7cc4..7b8815d 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -486,12 +486,22 @@
 	if (actual_length >= 4) {
 		struct ch341_private *priv = usb_get_serial_port_data(port);
 		unsigned long flags;
+		u8 prev_line_status = priv->line_status;
 
 		spin_lock_irqsave(&priv->lock, flags);
 		priv->line_status = (~(data[2])) & CH341_BITS_MODEM_STAT;
 		if ((data[1] & CH341_MULT_STAT))
 			priv->multi_status_change = 1;
 		spin_unlock_irqrestore(&priv->lock, flags);
+
+		if ((priv->line_status ^ prev_line_status) & CH341_BIT_DCD) {
+			struct tty_struct *tty = tty_port_tty_get(&port->port);
+			if (tty)
+				usb_serial_handle_dcd_change(port, tty,
+					    priv->line_status & CH341_BIT_DCD);
+			tty_kref_put(tty);
+		}
+
 		wake_up_interruptible(&priv->delta_msr_wait);
 	}
 
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 8d7731d..735ea03 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -49,7 +49,6 @@
 static void cp210x_break_ctl(struct tty_struct *, int);
 static int cp210x_startup(struct usb_serial *);
 static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
-static int cp210x_carrier_raised(struct usb_serial_port *p);
 
 static int debug;
 
@@ -87,7 +86,6 @@
 	{ USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */
 	{ USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */
 	{ USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */
-	{ USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
 	{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
 	{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
 	{ USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
@@ -110,7 +108,9 @@
 	{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
 	{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
 	{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
+	{ USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
 	{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
+	{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
 	{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
 	{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
 	{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
@@ -165,8 +165,7 @@
 	.tiocmget 		= cp210x_tiocmget,
 	.tiocmset		= cp210x_tiocmset,
 	.attach			= cp210x_startup,
-	.dtr_rts		= cp210x_dtr_rts,
-	.carrier_raised		= cp210x_carrier_raised
+	.dtr_rts		= cp210x_dtr_rts
 };
 
 /* Config request types */
@@ -765,15 +764,6 @@
 	return result;
 }
 
-static int cp210x_carrier_raised(struct usb_serial_port *p)
-{
-	unsigned int control;
-	cp210x_get_config(p, CP210X_GET_MDMSTS, &control, 1);
-	if (control & CONTROL_DCD)
-		return 1;
-	return 0;
-}
-
 static void cp210x_break_ctl (struct tty_struct *tty, int break_state)
 {
 	struct usb_serial_port *port = tty->driver_data;
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index b92070c..666e5a6 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -455,7 +455,6 @@
 static int digi_chars_in_buffer(struct tty_struct *tty);
 static int digi_open(struct tty_struct *tty, struct usb_serial_port *port);
 static void digi_close(struct usb_serial_port *port);
-static int digi_carrier_raised(struct usb_serial_port *port);
 static void digi_dtr_rts(struct usb_serial_port *port, int on);
 static int digi_startup_device(struct usb_serial *serial);
 static int digi_startup(struct usb_serial *serial);
@@ -511,7 +510,6 @@
 	.open =				digi_open,
 	.close =			digi_close,
 	.dtr_rts =			digi_dtr_rts,
-	.carrier_raised =		digi_carrier_raised,
 	.write =			digi_write,
 	.write_room =			digi_write_room,
 	.write_bulk_callback = 		digi_write_bulk_callback,
@@ -1339,14 +1337,6 @@
 	digi_set_modem_signals(port, on * (TIOCM_DTR|TIOCM_RTS), 1);
 }
 
-static int digi_carrier_raised(struct usb_serial_port *port)
-{
-	struct digi_port *priv = usb_get_serial_port_data(port);
-	if (priv->dp_modem_signals & TIOCM_CD)
-		return 1;
-	return 0;
-}
-
 static int digi_open(struct tty_struct *tty, struct usb_serial_port *port)
 {
 	int ret;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index a2668d0..f349a36 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -100,6 +100,7 @@
 static int   ftdi_jtag_probe(struct usb_serial *serial);
 static int   ftdi_mtxorb_hack_setup(struct usb_serial *serial);
 static int   ftdi_NDI_device_setup(struct usb_serial *serial);
+static int   ftdi_stmclite_probe(struct usb_serial *serial);
 static void  ftdi_USB_UIRT_setup(struct ftdi_private *priv);
 static void  ftdi_HE_TIRA1_setup(struct ftdi_private *priv);
 
@@ -123,6 +124,10 @@
 	.port_probe = ftdi_HE_TIRA1_setup,
 };
 
+static struct ftdi_sio_quirk ftdi_stmclite_quirk = {
+	.probe	= ftdi_stmclite_probe,
+};
+
 /*
  * The 8U232AM has the same API as the sio except for:
  * - it can support MUCH higher baudrates; up to:
@@ -616,6 +621,7 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
 	{ USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
+	{ USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) },
 	{ USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
 	{ USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
 	{ USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
@@ -676,7 +682,17 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) },
-	{ USB_DEVICE(ICOM_ID1_VID, ICOM_ID1_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2C2_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2D_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2VT_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2VR_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVT_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP4KVR_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVT_PID) },
+	{ USB_DEVICE(ICOM_VID, ICOM_ID_RP2KVR_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
@@ -800,6 +816,8 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
 	{ USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+	{ USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
+		.driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
 	{ },					/* Optional parameter entry */
 	{ }					/* Terminating entry */
 };
@@ -1699,6 +1717,25 @@
 }
 
 /*
+ * First and second port on STMCLiteadaptors is reserved for JTAG interface
+ * and the forth port for pio
+ */
+static int ftdi_stmclite_probe(struct usb_serial *serial)
+{
+	struct usb_device *udev = serial->dev;
+	struct usb_interface *interface = serial->interface;
+
+	dbg("%s", __func__);
+
+	if (interface == udev->actconfig->interface[2])
+		return 0;
+
+	dev_info(&udev->dev, "Ignoring serial port reserved for JTAG\n");
+
+	return -ENODEV;
+}
+
+/*
  * The Matrix Orbital VK204-25-USB has an invalid IN endpoint.
  * We have to correct it if we want to read from it.
  */
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index bf08672..117e8e6 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -518,6 +518,12 @@
 #define RATOC_PRODUCT_ID_USB60F	0xb020
 
 /*
+ * Acton Research Corp.
+ */
+#define ACTON_VID		0x0647	/* Vendor ID */
+#define ACTON_SPECTRAPRO_PID	0x0100
+
+/*
  * Contec products (http://www.contec.com)
  * Submitted by Daniel Sangorrin
  */
@@ -569,11 +575,23 @@
 #define OCT_US101_PID		0x0421	/* OCT US101 USB to RS-232 */
 
 /*
- * Icom ID-1 digital transceiver
+ * Definitions for Icom Inc. devices
  */
-
-#define ICOM_ID1_VID            0x0C26
-#define ICOM_ID1_PID            0x0004
+#define ICOM_VID		0x0C26 /* Icom vendor ID */
+/* Note: ID-1 is a communications tranceiver for HAM-radio operators */
+#define ICOM_ID_1_PID		0x0004 /* ID-1 USB to RS-232 */
+/* Note: OPC is an Optional cable to connect an Icom Tranceiver */
+#define ICOM_OPC_U_UC_PID	0x0018 /* OPC-478UC, OPC-1122U cloning cable */
+/* Note: ID-RP* devices are Icom Repeater Devices for HAM-radio */
+#define ICOM_ID_RP2C1_PID	0x0009 /* ID-RP2C Asset 1 to RS-232 */
+#define ICOM_ID_RP2C2_PID	0x000A /* ID-RP2C Asset 2 to RS-232 */
+#define ICOM_ID_RP2D_PID	0x000B /* ID-RP2D configuration port*/
+#define ICOM_ID_RP2VT_PID	0x000C /* ID-RP2V Transmit config port */
+#define ICOM_ID_RP2VR_PID	0x000D /* ID-RP2V Receive config port */
+#define ICOM_ID_RP4KVT_PID	0x0010 /* ID-RP4000V Transmit config port */
+#define ICOM_ID_RP4KVR_PID	0x0011 /* ID-RP4000V Receive config port */
+#define ICOM_ID_RP2KVT_PID	0x0012 /* ID-RP2000V Transmit config port */
+#define ICOM_ID_RP2KVR_PID	0x0013 /* ID-RP2000V Receive config port */
 
 /*
  * GN Otometrics (http://www.otometrics.com)
@@ -1022,6 +1040,12 @@
 #define WHT_PID			0x0004 /* Wireless Handheld Terminal */
 
 /*
+ * STMicroelectonics
+ */
+#define ST_VID			0x0483
+#define ST_STMCLT1030_PID	0x3747 /* ST Micro Connect Lite STMCLT1030 */
+
+/*
  * Papouch products (http://www.papouch.com/)
  * Submitted by Folkert van Heusden
  */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index e6833e2..e4db5ad 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -479,6 +479,26 @@
 }
 EXPORT_SYMBOL_GPL(usb_serial_handle_break);
 
+/**
+ *	usb_serial_handle_dcd_change - handle a change of carrier detect state
+ *	@port: usb_serial_port structure for the open port
+ *	@tty: tty_struct structure for the port
+ *	@status: new carrier detect status, nonzero if active
+ */
+void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
+				struct tty_struct *tty, unsigned int status)
+{
+	struct tty_port *port = &usb_port->port;
+
+	dbg("%s - port %d, status %d", __func__, usb_port->number, status);
+
+	if (status)
+		wake_up_interruptible(&port->open_wait);
+	else if (tty && !C_CLOCAL(tty))
+		tty_hangup(tty);
+}
+EXPORT_SYMBOL_GPL(usb_serial_handle_dcd_change);
+
 int usb_serial_generic_resume(struct usb_serial *serial)
 {
 	struct usb_serial_port *port;
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index cd769ef..3b246d9 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2889,8 +2889,8 @@
 
 	dbg("%s %d.%d.%d", fw_info, rec->data[0], rec->data[1], build);
 
-	edge_serial->product_info.FirmwareMajorVersion = fw->data[0];
-	edge_serial->product_info.FirmwareMinorVersion = fw->data[1];
+	edge_serial->product_info.FirmwareMajorVersion = rec->data[0];
+	edge_serial->product_info.FirmwareMinorVersion = rec->data[1];
 	edge_serial->product_info.FirmwareBuildNumber = cpu_to_le16(build);
 
 	for (rec = ihex_next_binrec(rec); rec;
diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h
index 6ab2a3f..178b22e 100644
--- a/drivers/usb/serial/io_tables.h
+++ b/drivers/usb/serial/io_tables.h
@@ -199,6 +199,7 @@
 		.name		= "epic",
 	},
 	.description		= "EPiC device",
+	.usb_driver		= &io_driver,
 	.id_table		= Epic_port_id_table,
 	.num_ports		= 1,
 	.open			= edge_open,
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 12ed594..99b97c0 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -1275,6 +1275,7 @@
 		   .name = "iuu_phoenix",
 		   },
 	.id_table = id_table,
+	.usb_driver = &iuu_driver,
 	.num_ports = 1,
 	.bulk_in_size = 512,
 	.bulk_out_size = 512,
diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
index 2d8baf6..ce134dc 100644
--- a/drivers/usb/serial/keyspan.h
+++ b/drivers/usb/serial/keyspan.h
@@ -546,6 +546,7 @@
 		.name		= "keyspan_no_firm",
 	},
 	.description		= "Keyspan - (without firmware)",
+	.usb_driver		= &keyspan_driver,
 	.id_table		= keyspan_pre_ids,
 	.num_ports		= 1,
 	.attach			= keyspan_fake_startup,
@@ -557,6 +558,7 @@
 		.name		= "keyspan_1",
 	},
 	.description		= "Keyspan 1 port adapter",
+	.usb_driver		= &keyspan_driver,
 	.id_table		= keyspan_1port_ids,
 	.num_ports		= 1,
 	.open			= keyspan_open,
@@ -579,6 +581,7 @@
 		.name		= "keyspan_2",
 	},
 	.description		= "Keyspan 2 port adapter",
+	.usb_driver		= &keyspan_driver,
 	.id_table		= keyspan_2port_ids,
 	.num_ports		= 2,
 	.open			= keyspan_open,
@@ -601,6 +604,7 @@
 		.name		= "keyspan_4",
 	},
 	.description		= "Keyspan 4 port adapter",
+	.usb_driver		= &keyspan_driver,
 	.id_table		= keyspan_4port_ids,
 	.num_ports		= 4,
 	.open			= keyspan_open,
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index a10dd56..554a869 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -679,22 +679,6 @@
 	}
 }
 
-static int keyspan_pda_carrier_raised(struct usb_serial_port *port)
-{
-	struct usb_serial *serial = port->serial;
-	unsigned char modembits;
-
-	/* If we can read the modem status and the DCD is low then
-	   carrier is not raised yet */
-	if (keyspan_pda_get_modem_info(serial, &modembits) >= 0) {
-		if (!(modembits & (1>>6)))
-			return 0;
-	}
-	/* Carrier raised, or we failed (eg disconnected) so
-	   progress accordingly */
-	return 1;
-}
-
 
 static int keyspan_pda_open(struct tty_struct *tty,
 					struct usb_serial_port *port)
@@ -881,7 +865,6 @@
 	.id_table =		id_table_std,
 	.num_ports =		1,
 	.dtr_rts =		keyspan_pda_dtr_rts,
-	.carrier_raised	=	keyspan_pda_carrier_raised,
 	.open =			keyspan_pda_open,
 	.close =		keyspan_pda_close,
 	.write =		keyspan_pda_write,
diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c
index cf17183..653465f 100644
--- a/drivers/usb/serial/moto_modem.c
+++ b/drivers/usb/serial/moto_modem.c
@@ -44,6 +44,7 @@
 		.name =		"moto-modem",
 	},
 	.id_table =		id_table,
+	.usb_driver =		&moto_driver,
 	.num_ports =		1,
 };
 
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 7487782..5f46838 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -382,7 +382,16 @@
 #define HAIER_VENDOR_ID				0x201e
 #define HAIER_PRODUCT_CE100			0x2009
 
-#define CINTERION_VENDOR_ID			0x0681
+/* Cinterion (formerly Siemens) products */
+#define SIEMENS_VENDOR_ID				0x0681
+#define CINTERION_VENDOR_ID				0x1e2d
+#define CINTERION_PRODUCT_HC25_MDM		0x0047
+#define CINTERION_PRODUCT_HC25_MDMNET	0x0040
+#define CINTERION_PRODUCT_HC28_MDM		0x004C
+#define CINTERION_PRODUCT_HC28_MDMNET	0x004A /* same for HC28J */
+#define CINTERION_PRODUCT_EU3_E			0x0051
+#define CINTERION_PRODUCT_EU3_P			0x0052
+#define CINTERION_PRODUCT_PH8			0x0053
 
 /* Olivetti products */
 #define OLIVETTI_VENDOR_ID			0x0b3c
@@ -944,7 +953,17 @@
 	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
 	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
 	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
-	{ USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) },
+	/* Cinterion */
+	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
+	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
+	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
+	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, 
+	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
+	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
+	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
+	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+
 	{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
 	{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
 	{ USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index 5be866b..7361320 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -157,6 +157,7 @@
 		.name =		"oti6858",
 	},
 	.id_table =		id_table,
+	.usb_driver =		&oti6858_driver,
 	.num_ports =		1,
 	.open =			oti6858_open,
 	.close =		oti6858_close,
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 8ae4c6c..08c9181 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -50,6 +50,7 @@
 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_GPRS) },
 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
+	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
 	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
 	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
 	{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
@@ -677,9 +678,11 @@
 {
 
 	struct pl2303_private *priv = usb_get_serial_port_data(port);
+	struct tty_struct *tty;
 	unsigned long flags;
 	u8 status_idx = UART_STATE;
 	u8 length = UART_STATE + 1;
+	u8 prev_line_status;
 	u16 idv, idp;
 
 	idv = le16_to_cpu(port->serial->dev->descriptor.idVendor);
@@ -701,11 +704,20 @@
 
 	/* Save off the uart status for others to look at */
 	spin_lock_irqsave(&priv->lock, flags);
+	prev_line_status = priv->line_status;
 	priv->line_status = data[status_idx];
 	spin_unlock_irqrestore(&priv->lock, flags);
 	if (priv->line_status & UART_BREAK_ERROR)
 		usb_serial_handle_break(port);
 	wake_up_interruptible(&priv->delta_msr_wait);
+
+	tty = tty_port_tty_get(&port->port);
+	if (!tty)
+		return;
+	if ((priv->line_status ^ prev_line_status) & UART_DCD)
+		usb_serial_handle_dcd_change(port, tty,
+				priv->line_status & UART_DCD);
+	tty_kref_put(tty);
 }
 
 static void pl2303_read_int_callback(struct urb *urb)
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 43eb9bd..1b025f7 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -21,6 +21,7 @@
 #define PL2303_PRODUCT_ID_MMX		0x0612
 #define PL2303_PRODUCT_ID_GPRS		0x0609
 #define PL2303_PRODUCT_ID_HCR331	0x331a
+#define PL2303_PRODUCT_ID_MOTOROLA	0x0307
 
 #define ATEN_VENDOR_ID		0x0557
 #define ATEN_VENDOR_ID2		0x0547
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
index 214a3e5..30b73e6 100644
--- a/drivers/usb/serial/qcaux.c
+++ b/drivers/usb/serial/qcaux.c
@@ -36,6 +36,7 @@
 #define UTSTARCOM_PRODUCT_UM175_V1		0x3712
 #define UTSTARCOM_PRODUCT_UM175_V2		0x3714
 #define UTSTARCOM_PRODUCT_UM175_ALLTEL		0x3715
+#define PANTECH_PRODUCT_UML290_VZW		0x3718
 
 /* CMOTECH devices */
 #define CMOTECH_VENDOR_ID			0x16d8
@@ -66,6 +67,7 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) },
 	{ },
 };
 MODULE_DEVICE_TABLE(usb, id_table);
@@ -84,6 +86,7 @@
 		.name =		"qcaux",
 	},
 	.id_table =		id_table,
+	.usb_driver =		&qcaux_driver,
 	.num_ports =		1,
 };
 
diff --git a/drivers/usb/serial/siemens_mpi.c b/drivers/usb/serial/siemens_mpi.c
index cb8195c..74cd4cc 100644
--- a/drivers/usb/serial/siemens_mpi.c
+++ b/drivers/usb/serial/siemens_mpi.c
@@ -42,6 +42,7 @@
 		.name =		"siemens_mpi",
 	},
 	.id_table =		id_table,
+	.usb_driver =		&siemens_usb_mpi_driver,
 	.num_ports =		1,
 };
 
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 765aa98..cbfb70b 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -133,7 +133,7 @@
 
 /* how come ??? */
 #define UART_STATE			0x08
-#define UART_STATE_TRANSIENT_MASK	0x74
+#define UART_STATE_TRANSIENT_MASK	0x75
 #define UART_DCD			0x01
 #define UART_DSR			0x02
 #define UART_BREAK_ERROR		0x04
@@ -525,6 +525,10 @@
 		/* overrun is special, not associated with a char */
 		if (status & UART_OVERRUN_ERROR)
 			tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+
+		if (status & UART_DCD)
+			usb_serial_handle_dcd_change(port, tty,
+				   priv->line_status & MSR_STATUS_LINE_DCD);
 	}
 
 	tty_insert_flip_string_fixed_flag(tty, data, tty_flag,
@@ -645,6 +649,7 @@
 		.name =		"SPCP8x5",
 	},
 	.id_table		= id_table,
+	.usb_driver		= &spcp8x5_driver,
 	.num_ports		= 1,
 	.open 			= spcp8x5_open,
 	.dtr_rts		= spcp8x5_dtr_rts,
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index b2902f3..a910004 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -369,9 +369,9 @@
 
 static void __exit ti_exit(void)
 {
+	usb_deregister(&ti_usb_driver);
 	usb_serial_deregister(&ti_1port_device);
 	usb_serial_deregister(&ti_2port_device);
-	usb_deregister(&ti_usb_driver);
 }
 
 
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 6954de5..546a521 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1344,11 +1344,15 @@
 		return -ENODEV;
 
 	fixup_generic(driver);
-	if (driver->usb_driver)
-		driver->usb_driver->supports_autosuspend = 1;
 
 	if (!driver->description)
 		driver->description = driver->driver.name;
+	if (!driver->usb_driver) {
+		WARN(1, "Serial driver %s has no usb_driver\n",
+				driver->description);
+		return -EINVAL;
+	}
+	driver->usb_driver->supports_autosuspend = 1;
 
 	/* Add this device to our list of devices */
 	mutex_lock(&table_lock);
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index f2ed6a3..95a8214 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -75,6 +75,7 @@
 		.name =		"debug",
 	},
 	.id_table =		id_table,
+	.usb_driver =		&debug_driver,
 	.num_ports =		1,
 	.bulk_out_size =	USB_DEBUG_MAX_PACKET_SIZE,
 	.break_ctl =		usb_debug_break_ctl,
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index c854fde..2c85530 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -31,4 +31,9 @@
 		"Cypress ISD-300LP",
 		USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
 
+UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x9999,
+		"Super Top",
+		"USB 2.0  SATA BRIDGE",
+		USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
+
 #endif /* defined(CONFIG_USB_STORAGE_CYPRESS_ATACB) || ... */
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index fcc1e32..c1602b8 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1044,6 +1044,15 @@
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_BULK32),
 
+/* Reported by <ttkspam@free.fr>
+ * The device reports a vendor-specific device class, requiring an
+ * explicit vendor/product match.
+ */
+UNUSUAL_DEV(  0x0851, 0x1542, 0x0002, 0x0002,
+		"MagicPixel",
+		"FW_Omega2",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL, 0),
+
 /* Andrew Lunn <andrew@lunn.ch>
  * PanDigital Digital Picture Frame. Does not like ALLOW_MEDIUM_REMOVAL
  * on LUN 4.
@@ -1388,6 +1397,13 @@
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_IGNORE_RESIDUE ),
 
+/* Submitted by Nick Holloway */
+UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
+		"VTech",
+		"Kidizoom",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_FIX_CAPACITY ),
+
 /* Reported by Michael Stattmann <michael@stattmann.com> */
 UNUSUAL_DEV(  0x0fce, 0xd008, 0x0000, 0x0000,
 		"Sony Ericsson",
@@ -1872,6 +1888,22 @@
 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
 		US_FL_NO_READ_DISC_INFO ),
 
+/* Patch by Richard Schütz <r.schtz@t-online.de>
+ * This external hard drive enclosure uses a JMicron chip which
+ * needs the US_FL_IGNORE_RESIDUE flag to work properly. */
+UNUSUAL_DEV(  0x1e68, 0x001b, 0x0000, 0x0000,
+		"TrekStor GmbH & Co. KG",
+		"DataStation maxi g.u",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
+
+/* Reported by Jasper Mackenzie <scarletpimpernal@hotmail.com> */
+UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
+		"Coby Electronics",
+		"MP3 Player",
+		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+		US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
+
 UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
 		"ST",
 		"2A",
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9b3ca10..f616cef 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -128,8 +128,7 @@
 	size_t hdr_size;
 	struct socket *sock;
 
-	/* TODO: check that we are running from vhost_worker?
-	 * Not sure it's worth it, it's straight-forward enough. */
+	/* TODO: check that we are running from vhost_worker? */
 	sock = rcu_dereference_check(vq->private_data, 1);
 	if (!sock)
 		return;
@@ -306,7 +305,8 @@
 	size_t len, total_len = 0;
 	int err;
 	size_t hdr_size;
-	struct socket *sock = rcu_dereference(vq->private_data);
+	/* TODO: check that we are running from vhost_worker? */
+	struct socket *sock = rcu_dereference_check(vq->private_data, 1);
 	if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
 		return;
 
@@ -415,7 +415,8 @@
 	int err, headcount;
 	size_t vhost_hlen, sock_hlen;
 	size_t vhost_len, sock_len;
-	struct socket *sock = rcu_dereference(vq->private_data);
+	/* TODO: check that we are running from vhost_worker? */
+	struct socket *sock = rcu_dereference_check(vq->private_data, 1);
 	if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
 		return;
 
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 2af44b7..b3363ae 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -173,9 +173,9 @@
 {
 	unsigned acked_features;
 
-	acked_features =
-		rcu_dereference_index_check(dev->acked_features,
-					    lockdep_is_held(&dev->mutex));
+	/* TODO: check that we are running from vhost_worker or dev mutex is
+	 * held? */
+	acked_features = rcu_dereference_index_check(dev->acked_features, 1);
 	return acked_features & (1 << bit);
 }
 
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index d916ac0..6bafb51b 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1227,7 +1227,7 @@
 
 config FB_INTEL
 	tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support (EXPERIMENTAL)"
-	depends on EXPERIMENTAL && FB && PCI && X86 && AGP_INTEL && EMBEDDED
+	depends on EXPERIMENTAL && FB && PCI && X86 && AGP_INTEL && EXPERT
 	select FB_MODE_HELPERS
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
diff --git a/drivers/video/arkfb.c b/drivers/video/arkfb.c
index d583bea..391ac93 100644
--- a/drivers/video/arkfb.c
+++ b/drivers/video/arkfb.c
@@ -23,7 +23,7 @@
 #include <linux/svga.h>
 #include <linux/init.h>
 #include <linux/pci.h>
-#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
+#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
 #include <video/vga.h>
 
 #ifdef CONFIG_MTRR
@@ -1091,12 +1091,12 @@
 
 	dev_info(info->device, "suspend\n");
 
-	acquire_console_sem();
+	console_lock();
 	mutex_lock(&(par->open_lock));
 
 	if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
 		mutex_unlock(&(par->open_lock));
-		release_console_sem();
+		console_unlock();
 		return 0;
 	}
 
@@ -1107,7 +1107,7 @@
 	pci_set_power_state(dev, pci_choose_state(dev, state));
 
 	mutex_unlock(&(par->open_lock));
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
@@ -1122,7 +1122,7 @@
 
 	dev_info(info->device, "resume\n");
 
-	acquire_console_sem();
+	console_lock();
 	mutex_lock(&(par->open_lock));
 
 	if (par->ref_count == 0)
@@ -1141,7 +1141,7 @@
 
 fail:
 	mutex_unlock(&(par->open_lock));
-	release_console_sem();
+	console_unlock();
 	return 0;
 }
 #else
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index dd9de2e..4cb6a57 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1860,11 +1860,11 @@
 {
         struct aty128fb_par *par = data;
 
-	if (try_acquire_console_sem())
+	if (!console_trylock())
 		return;
 	pci_restore_state(par->pdev);
 	aty128_do_resume(par->pdev);
-	release_console_sem();
+	console_unlock();
 }
 #endif /* CONFIG_PPC_PMAC */
 
@@ -2438,7 +2438,7 @@
 
 	printk(KERN_DEBUG "aty128fb: suspending...\n");
 	
-	acquire_console_sem();
+	console_lock();
 
 	fb_set_suspend(info, 1);
 
@@ -2470,7 +2470,7 @@
 	if (state.event != PM_EVENT_ON)
 		aty128_set_suspend(par, 1);
 
-	release_console_sem();
+	console_unlock();
 
 	pdev->dev.power.power_state = state;
 
@@ -2527,9 +2527,9 @@
 {
 	int rc;
 
-	acquire_console_sem();
+	console_lock();
 	rc = aty128_do_resume(pdev);
-	release_console_sem();
+	console_unlock();
 
 	return rc;
 }
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 767ab4f..94e293f 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -2069,7 +2069,7 @@
 	if (state.event == pdev->dev.power.power_state.event)
 		return 0;
 
-	acquire_console_sem();
+	console_lock();
 
 	fb_set_suspend(info, 1);
 
@@ -2097,14 +2097,14 @@
 		par->lock_blank = 0;
 		atyfb_blank(FB_BLANK_UNBLANK, info);
 		fb_set_suspend(info, 0);
-		release_console_sem();
+		console_unlock();
 		return -EIO;
 	}
 #else
 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
 #endif
 
-	release_console_sem();
+	console_unlock();
 
 	pdev->dev.power.power_state = state;
 
@@ -2133,7 +2133,7 @@
 	if (pdev->dev.power.power_state.event == PM_EVENT_ON)
 		return 0;
 
-	acquire_console_sem();
+	console_lock();
 
 	/*
 	 * PCI state will have been restored by the core, so
@@ -2161,7 +2161,7 @@
 	par->lock_blank = 0;
 	atyfb_blank(FB_BLANK_UNBLANK, info);
 
-	release_console_sem();
+	console_unlock();
 
 	pdev->dev.power.power_state = PMSG_ON;
 
diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c
index c4e1764..92bda58 100644
--- a/drivers/video/aty/radeon_pm.c
+++ b/drivers/video/aty/radeon_pm.c
@@ -2626,7 +2626,7 @@
 		goto done;
 	}
 
-	acquire_console_sem();
+	console_lock();
 
 	fb_set_suspend(info, 1);
 
@@ -2690,7 +2690,7 @@
 	if (rinfo->pm_mode & radeon_pm_d2)
 		radeon_set_suspend(rinfo, 1);
 
-	release_console_sem();
+	console_unlock();
 
  done:
 	pdev->dev.power.power_state = mesg;
@@ -2715,10 +2715,10 @@
 		return 0;
 
 	if (rinfo->no_schedule) {
-		if (try_acquire_console_sem())
+		if (!console_trylock())
 			return 0;
 	} else
-		acquire_console_sem();
+		console_lock();
 
 	printk(KERN_DEBUG "radeonfb (%s): resuming from state: %d...\n",
 	       pci_name(pdev), pdev->dev.power.power_state.event);
@@ -2783,7 +2783,7 @@
 	pdev->dev.power.power_state = PMSG_ON;
 
  bail:
-	release_console_sem();
+	console_unlock();
 
 	return rc;
 }
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index c789c46..b224396 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -21,7 +21,7 @@
 #define MAX_BRIGHTNESS		(0xFF)
 #define MIN_BRIGHTNESS		(0)
 
-#define CURRENT_MASK		(0x1F << 1)
+#define CURRENT_BITMASK		(0x1F << 1)
 
 struct pm860x_backlight_data {
 	struct pm860x_chip *chip;
@@ -85,7 +85,7 @@
 	if ((data->current_brightness == 0) && brightness) {
 		if (data->iset) {
 			ret = pm860x_set_bits(data->i2c, wled_idc(data->port),
-					      CURRENT_MASK, data->iset);
+					      CURRENT_BITMASK, data->iset);
 			if (ret < 0)
 				goto out;
 		}
diff --git a/drivers/video/bf537-lq035.c b/drivers/video/bf537-lq035.c
index 18c5078..47c21fb 100644
--- a/drivers/video/bf537-lq035.c
+++ b/drivers/video/bf537-lq035.c
@@ -696,6 +696,7 @@
 {
 	struct backlight_properties props;
 	dma_addr_t dma_handle;
+	int ret;
 
 	if (request_dma(CH_PPI, KBUILD_MODNAME)) {
 		pr_err("couldn't request PPI DMA\n");
@@ -704,17 +705,16 @@
 
 	if (request_ports()) {
 		pr_err("couldn't request gpio port\n");
-		free_dma(CH_PPI);
-		return -EFAULT;
+		ret = -EFAULT;
+		goto out_ports;
 	}
 
 	fb_buffer = dma_alloc_coherent(NULL, TOTAL_VIDEO_MEM_SIZE,
 				       &dma_handle, GFP_KERNEL);
 	if (fb_buffer == NULL) {
 		pr_err("couldn't allocate dma buffer\n");
-		free_dma(CH_PPI);
-		free_ports();
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto out_dma_coherent;
 	}
 
 	if (L1_DATA_A_LENGTH)
@@ -725,10 +725,8 @@
 
 	if (dma_desc_table == NULL) {
 		pr_err("couldn't allocate dma descriptor\n");
-		free_dma(CH_PPI);
-		free_ports();
-		dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto out_table;
 	}
 
 	bfin_lq035_fb.screen_base = (void *)fb_buffer;
@@ -771,31 +769,21 @@
 	bfin_lq035_fb.pseudo_palette = kzalloc(sizeof(u32) * 16, GFP_KERNEL);
 	if (bfin_lq035_fb.pseudo_palette == NULL) {
 		pr_err("failed to allocate pseudo_palette\n");
-		free_dma(CH_PPI);
-		free_ports();
-		dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
-		return -ENOMEM;
+		ret = -ENOMEM;
+		goto out_palette;
 	}
 
 	if (fb_alloc_cmap(&bfin_lq035_fb.cmap, NBR_PALETTE, 0) < 0) {
 		pr_err("failed to allocate colormap (%d entries)\n",
 			NBR_PALETTE);
-		free_dma(CH_PPI);
-		free_ports();
-		dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
-		kfree(bfin_lq035_fb.pseudo_palette);
-		return -EFAULT;
+		ret = -EFAULT;
+		goto out_cmap;
 	}
 
 	if (register_framebuffer(&bfin_lq035_fb) < 0) {
 		pr_err("unable to register framebuffer\n");
-		free_dma(CH_PPI);
-		free_ports();
-		dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
-		fb_buffer = NULL;
-		kfree(bfin_lq035_fb.pseudo_palette);
-		fb_dealloc_cmap(&bfin_lq035_fb.cmap);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out_reg;
 	}
 
 	i2c_add_driver(&ad5280_driver);
@@ -807,11 +795,31 @@
 
 	lcd_dev = lcd_device_register(KBUILD_MODNAME, &pdev->dev, NULL,
 				      &bfin_lcd_ops);
+	if (IS_ERR(lcd_dev)) {
+		pr_err("unable to register lcd\n");
+		ret = PTR_ERR(lcd_dev);
+		goto out_lcd;
+	}
 	lcd_dev->props.max_contrast = 255,
 
 	pr_info("initialized");
 
 	return 0;
+out_lcd:
+	unregister_framebuffer(&bfin_lq035_fb);
+out_reg:
+	fb_dealloc_cmap(&bfin_lq035_fb.cmap);
+out_cmap:
+	kfree(bfin_lq035_fb.pseudo_palette);
+out_palette:
+out_table:
+	dma_free_coherent(NULL, TOTAL_VIDEO_MEM_SIZE, fb_buffer, 0);
+	fb_buffer = NULL;
+out_dma_coherent:
+	free_ports();
+out_ports:
+	free_dma(CH_PPI);
+	return ret;
 }
 
 static int __devexit bfin_lq035_remove(struct platform_device *pdev)
diff --git a/drivers/video/chipsfb.c b/drivers/video/chipsfb.c
index d637e1f..cff742a 100644
--- a/drivers/video/chipsfb.c
+++ b/drivers/video/chipsfb.c
@@ -460,10 +460,10 @@
 	if (!(state.event & PM_EVENT_SLEEP))
 		goto done;
 
-	acquire_console_sem();
+	console_lock();
 	chipsfb_blank(1, p);
 	fb_set_suspend(p, 1);
-	release_console_sem();
+	console_unlock();
  done:
 	pdev->dev.power.power_state = state;
 	return 0;
@@ -473,10 +473,10 @@
 {
         struct fb_info *p = pci_get_drvdata(pdev);
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(p, 0);
 	chipsfb_blank(0, p);
-	release_console_sem();
+	console_unlock();
 
 	pdev->dev.power.power_state = PMSG_ON;
 	return 0;
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 5a35f22..2209e35 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -5,7 +5,7 @@
 menu "Console display driver support"
 
 config VGA_CONSOLE
-	bool "VGA text console" if EMBEDDED || !X86
+	bool "VGA text console" if EXPERT || !X86
 	depends on !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && !SUPERH && !BLACKFIN && !AVR32 && !MN10300 && (!ARM || ARCH_FOOTBRIDGE || ARCH_INTEGRATOR || ARCH_NETWINDER)
 	default y
 	help
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 7ccc967..9c092b8 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -375,14 +375,14 @@
 	int c;
 	int mode;
 
-	acquire_console_sem();
+	console_lock();
 	if (ops && ops->currcon != -1)
 		vc = vc_cons[ops->currcon].d;
 
 	if (!vc || !CON_IS_VISIBLE(vc) ||
  	    registered_fb[con2fb_map[vc->vc_num]] != info ||
 	    vc->vc_deccm != 1) {
-		release_console_sem();
+		console_unlock();
 		return;
 	}
 
@@ -392,7 +392,7 @@
 		CM_ERASE : CM_DRAW;
 	ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1),
 		    get_color(vc, info, c, 0));
-	release_console_sem();
+	console_unlock();
 }
 
 static void cursor_timer_handler(unsigned long dev_addr)
@@ -836,7 +836,7 @@
 
 	found = search_fb_in_map(newidx);
 
-	acquire_console_sem();
+	console_lock();
 	con2fb_map[unit] = newidx;
 	if (!err && !found)
  		err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
@@ -863,7 +863,7 @@
 	if (!search_fb_in_map(info_idx))
 		info_idx = newidx;
 
-	release_console_sem();
+	console_unlock();
  	return err;
 }
 
@@ -3321,7 +3321,7 @@
 	if (fbcon_has_exited)
 		return count;
 
-	acquire_console_sem();
+	console_lock();
 	idx = con2fb_map[fg_console];
 
 	if (idx == -1 || registered_fb[idx] == NULL)
@@ -3331,7 +3331,7 @@
 	rotate = simple_strtoul(buf, last, 0);
 	fbcon_rotate(info, rotate);
 err:
-	release_console_sem();
+	console_unlock();
 	return count;
 }
 
@@ -3346,7 +3346,7 @@
 	if (fbcon_has_exited)
 		return count;
 
-	acquire_console_sem();
+	console_lock();
 	idx = con2fb_map[fg_console];
 
 	if (idx == -1 || registered_fb[idx] == NULL)
@@ -3356,7 +3356,7 @@
 	rotate = simple_strtoul(buf, last, 0);
 	fbcon_rotate_all(info, rotate);
 err:
-	release_console_sem();
+	console_unlock();
 	return count;
 }
 
@@ -3369,7 +3369,7 @@
 	if (fbcon_has_exited)
 		return 0;
 
-	acquire_console_sem();
+	console_lock();
 	idx = con2fb_map[fg_console];
 
 	if (idx == -1 || registered_fb[idx] == NULL)
@@ -3378,7 +3378,7 @@
 	info = registered_fb[idx];
 	rotate = fbcon_get_rotate(info);
 err:
-	release_console_sem();
+	console_unlock();
 	return snprintf(buf, PAGE_SIZE, "%d\n", rotate);
 }
 
@@ -3392,7 +3392,7 @@
 	if (fbcon_has_exited)
 		return 0;
 
-	acquire_console_sem();
+	console_lock();
 	idx = con2fb_map[fg_console];
 
 	if (idx == -1 || registered_fb[idx] == NULL)
@@ -3406,7 +3406,7 @@
 
 	blink = (ops->flags & FBCON_FLAGS_CURSOR_TIMER) ? 1 : 0;
 err:
-	release_console_sem();
+	console_unlock();
 	return snprintf(buf, PAGE_SIZE, "%d\n", blink);
 }
 
@@ -3421,7 +3421,7 @@
 	if (fbcon_has_exited)
 		return count;
 
-	acquire_console_sem();
+	console_lock();
 	idx = con2fb_map[fg_console];
 
 	if (idx == -1 || registered_fb[idx] == NULL)
@@ -3443,7 +3443,7 @@
 	}
 
 err:
-	release_console_sem();
+	console_unlock();
 	return count;
 }
 
@@ -3482,7 +3482,7 @@
 	if (num_registered_fb) {
 		int i;
 
-		acquire_console_sem();
+		console_lock();
 
 		for (i = 0; i < FB_MAX; i++) {
 			if (registered_fb[i] != NULL) {
@@ -3491,7 +3491,7 @@
 			}
 		}
 
-		release_console_sem();
+		console_unlock();
 		fbcon_takeover(0);
 	}
 }
@@ -3552,7 +3552,7 @@
 {
 	int i;
 
-	acquire_console_sem();
+	console_lock();
 	fb_register_client(&fbcon_event_notifier);
 	fbcon_device = device_create(fb_class, NULL, MKDEV(0, 0), NULL,
 				     "fbcon");
@@ -3568,7 +3568,7 @@
 	for (i = 0; i < MAX_NR_CONSOLES; i++)
 		con2fb_map[i] = -1;
 
-	release_console_sem();
+	console_unlock();
 	fbcon_start();
 	return 0;
 }
@@ -3591,12 +3591,12 @@
 
 static void __exit fb_console_exit(void)
 {
-	acquire_console_sem();
+	console_lock();
 	fb_unregister_client(&fbcon_event_notifier);
 	fbcon_deinit_device();
 	device_destroy(fb_class, MKDEV(0, 0));
 	fbcon_exit();
-	release_console_sem();
+	console_unlock();
 	unregister_con_driver(&fb_con);
 }	
 
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index c97491b..915fd74 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -202,11 +202,7 @@
 	}
 }
 
-/*
- * Called only duing init so call of alloc_bootmen is ok.
- * Marked __init_refok to silence modpost.
- */
-static void __init_refok vgacon_scrollback_startup(void)
+static void vgacon_scrollback_startup(void)
 {
 	vgacon_scrollback = kcalloc(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024, GFP_NOWAIT);
 	vgacon_scrollback_init(vga_video_num_columns * 2);
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index c265aed..8d61ef9 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -1092,9 +1092,10 @@
 
 irq_freq:
 #ifdef CONFIG_CPU_FREQ
+	lcd_da8xx_cpufreq_deregister(par);
+#endif
 err_cpu_freq:
 	unregister_framebuffer(da8xx_fb_info);
-#endif
 
 err_dealloc_cmap:
 	fb_dealloc_cmap(&da8xx_fb_info->cmap);
@@ -1130,14 +1131,14 @@
 	struct fb_info *info = platform_get_drvdata(dev);
 	struct da8xx_fb_par *par = info->par;
 
-	acquire_console_sem();
+	console_lock();
 	if (par->panel_power_ctrl)
 		par->panel_power_ctrl(0);
 
 	fb_set_suspend(info, 1);
 	lcd_disable_raster();
 	clk_disable(par->lcdc_clk);
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
@@ -1146,14 +1147,14 @@
 	struct fb_info *info = platform_get_drvdata(dev);
 	struct da8xx_fb_par *par = info->par;
 
-	acquire_console_sem();
+	console_lock();
 	if (par->panel_power_ctrl)
 		par->panel_power_ctrl(1);
 
 	clk_enable(par->lcdc_clk);
 	lcd_enable_raster();
 	fb_set_suspend(info, 0);
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 4ac1201..e2bf953 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1036,11 +1036,11 @@
 			return -EFAULT;
 		if (!lock_fb_info(info))
 			return -ENODEV;
-		acquire_console_sem();
+		console_lock();
 		info->flags |= FBINFO_MISC_USEREVENT;
 		ret = fb_set_var(info, &var);
 		info->flags &= ~FBINFO_MISC_USEREVENT;
-		release_console_sem();
+		console_unlock();
 		unlock_fb_info(info);
 		if (!ret && copy_to_user(argp, &var, sizeof(var)))
 			ret = -EFAULT;
@@ -1072,9 +1072,9 @@
 			return -EFAULT;
 		if (!lock_fb_info(info))
 			return -ENODEV;
-		acquire_console_sem();
+		console_lock();
 		ret = fb_pan_display(info, &var);
-		release_console_sem();
+		console_unlock();
 		unlock_fb_info(info);
 		if (ret == 0 && copy_to_user(argp, &var, sizeof(var)))
 			return -EFAULT;
@@ -1119,11 +1119,11 @@
 	case FBIOBLANK:
 		if (!lock_fb_info(info))
 			return -ENODEV;
-		acquire_console_sem();
+		console_lock();
 		info->flags |= FBINFO_MISC_USEREVENT;
 		ret = fb_blank(info, arg);
 		info->flags &= ~FBINFO_MISC_USEREVENT;
-		release_console_sem();
+		console_unlock();
 		unlock_fb_info(info);
 		break;
 	default:
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index 0a08f13..f4a3277 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -90,11 +90,11 @@
 	int err;
 
 	var->activate |= FB_ACTIVATE_FORCE;
-	acquire_console_sem();
+	console_lock();
 	fb_info->flags |= FBINFO_MISC_USEREVENT;
 	err = fb_set_var(fb_info, var);
 	fb_info->flags &= ~FBINFO_MISC_USEREVENT;
-	release_console_sem();
+	console_unlock();
 	if (err)
 		return err;
 	return 0;
@@ -175,7 +175,7 @@
 	if (i * sizeof(struct fb_videomode) != count)
 		return -EINVAL;
 
-	acquire_console_sem();
+	console_lock();
 	list_splice(&fb_info->modelist, &old_list);
 	fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
 				 &fb_info->modelist);
@@ -185,7 +185,7 @@
 	} else
 		fb_destroy_modelist(&old_list);
 
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
@@ -301,11 +301,11 @@
 	char *last = NULL;
 	int err;
 
-	acquire_console_sem();
+	console_lock();
 	fb_info->flags |= FBINFO_MISC_USEREVENT;
 	err = fb_blank(fb_info, simple_strtoul(buf, &last, 0));
 	fb_info->flags &= ~FBINFO_MISC_USEREVENT;
-	release_console_sem();
+	console_unlock();
 	if (err < 0)
 		return err;
 	return count;
@@ -364,9 +364,9 @@
 		return -EINVAL;
 	var.yoffset = simple_strtoul(last, &last, 0);
 
-	acquire_console_sem();
+	console_lock();
 	err = fb_pan_display(fb_info, &var);
-	release_console_sem();
+	console_unlock();
 
 	if (err < 0)
 		return err;
@@ -399,9 +399,9 @@
 
 	state = simple_strtoul(buf, &last, 0);
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(fb_info, (int)state);
-	release_console_sem();
+	console_unlock();
 
 	return count;
 }
diff --git a/drivers/video/geode/gxfb_core.c b/drivers/video/geode/gxfb_core.c
index 70b1d9d..b4f19db 100644
--- a/drivers/video/geode/gxfb_core.c
+++ b/drivers/video/geode/gxfb_core.c
@@ -344,10 +344,10 @@
 	struct fb_info *info = pci_get_drvdata(pdev);
 
 	if (state.event == PM_EVENT_SUSPEND) {
-		acquire_console_sem();
+		console_lock();
 		gx_powerdown(info);
 		fb_set_suspend(info, 1);
-		release_console_sem();
+		console_unlock();
 	}
 
 	/* there's no point in setting PCI states; we emulate PCI, so
@@ -361,7 +361,7 @@
 	struct fb_info *info = pci_get_drvdata(pdev);
 	int ret;
 
-	acquire_console_sem();
+	console_lock();
 	ret = gx_powerup(info);
 	if (ret) {
 		printk(KERN_ERR "gxfb:  power up failed!\n");
@@ -369,7 +369,7 @@
 	}
 
 	fb_set_suspend(info, 0);
-	release_console_sem();
+	console_unlock();
 	return 0;
 }
 #endif
diff --git a/drivers/video/geode/lxfb_core.c b/drivers/video/geode/lxfb_core.c
index 39bdbed..416851c 100644
--- a/drivers/video/geode/lxfb_core.c
+++ b/drivers/video/geode/lxfb_core.c
@@ -465,10 +465,10 @@
 	struct fb_info *info = pci_get_drvdata(pdev);
 
 	if (state.event == PM_EVENT_SUSPEND) {
-		acquire_console_sem();
+		console_lock();
 		lx_powerdown(info);
 		fb_set_suspend(info, 1);
-		release_console_sem();
+		console_unlock();
 	}
 
 	/* there's no point in setting PCI states; we emulate PCI, so
@@ -482,7 +482,7 @@
 	struct fb_info *info = pci_get_drvdata(pdev);
 	int ret;
 
-	acquire_console_sem();
+	console_lock();
 	ret = lx_powerup(info);
 	if (ret) {
 		printk(KERN_ERR "lxfb:  power up failed!\n");
@@ -490,7 +490,7 @@
 	}
 
 	fb_set_suspend(info, 0);
-	release_console_sem();
+	console_unlock();
 	return 0;
 }
 #else
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 5743ea2..318f6fb 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -1574,7 +1574,7 @@
 		return 0;
 	}
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(info, 1);
 
 	if (info->fbops->fb_sync)
@@ -1587,7 +1587,7 @@
 	pci_save_state(dev);
 	pci_disable_device(dev);
 	pci_set_power_state(dev, pci_choose_state(dev, mesg));
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
@@ -1605,7 +1605,7 @@
 		return 0;
 	}
 
-	acquire_console_sem();
+	console_lock();
 	pci_set_power_state(dev, PCI_D0);
 	pci_restore_state(dev);
 
@@ -1621,7 +1621,7 @@
 	fb_set_suspend (info, 0);
 	info->fbops->fb_blank(VESA_NO_BLANKING, info);
 fail:
-	release_console_sem();
+	console_unlock();
 	return 0;
 }
 /***********************************************************************
diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
index 670ecaa..de36693 100644
--- a/drivers/video/jz4740_fb.c
+++ b/drivers/video/jz4740_fb.c
@@ -778,9 +778,9 @@
 {
 	struct jzfb *jzfb = dev_get_drvdata(dev);
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(jzfb->fb, 1);
-	release_console_sem();
+	console_unlock();
 
 	mutex_lock(&jzfb->lock);
 	if (jzfb->is_enabled)
@@ -800,9 +800,9 @@
 		jzfb_enable(jzfb);
 	mutex_unlock(&jzfb->lock);
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(jzfb->fb, 0);
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index cb01391..7e3a490 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -1177,9 +1177,9 @@
 	struct mx3fb_data *mx3fb = platform_get_drvdata(pdev);
 	struct mx3fb_info *mx3_fbi = mx3fb->fbi->par;
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(mx3fb->fbi, 1);
-	release_console_sem();
+	console_unlock();
 
 	if (mx3_fbi->blank == FB_BLANK_UNBLANK) {
 		sdc_disable_channel(mx3_fbi);
@@ -1202,9 +1202,9 @@
 		sdc_set_brightness(mx3fb, mx3fb->backlight_level);
 	}
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(mx3fb->fbi, 0);
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c
index 62498bd..f838d9e 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/nuc900fb.c
@@ -696,6 +696,8 @@
 	nuc900fb_stop_lcd(fbinfo);
 	msleep(1);
 
+	unregister_framebuffer(fbinfo);
+	nuc900fb_cpufreq_deregister(fbi);
 	nuc900fb_unmap_video_memory(fbinfo);
 
 	iounmap(fbi->io);
@@ -723,7 +725,7 @@
 	struct fb_info	   *fbinfo = platform_get_drvdata(dev);
 	struct nuc900fb_info *info = fbinfo->par;
 
-	nuc900fb_stop_lcd();
+	nuc900fb_stop_lcd(fbinfo);
 	msleep(1);
 	clk_disable(info->clk);
 	return 0;
@@ -740,7 +742,7 @@
 	msleep(1);
 
 	nuc900fb_init_registers(fbinfo);
-	nuc900fb_activate_var(bfinfo);
+	nuc900fb_activate_var(fbinfo);
 
 	return 0;
 }
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index efe10ff..081dc47 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -1057,7 +1057,7 @@
 
 	if (mesg.event == PM_EVENT_PRETHAW)
 		mesg.event = PM_EVENT_FREEZE;
-	acquire_console_sem();
+	console_lock();
 	par->pm_state = mesg.event;
 
 	if (mesg.event & PM_EVENT_SLEEP) {
@@ -1070,7 +1070,7 @@
 	}
 	dev->dev.power.power_state = mesg;
 
-	release_console_sem();
+	console_unlock();
 	return 0;
 }
 
@@ -1079,7 +1079,7 @@
 	struct fb_info *info = pci_get_drvdata(dev);
 	struct nvidia_par *par = info->par;
 
-	acquire_console_sem();
+	console_lock();
 	pci_set_power_state(dev, PCI_D0);
 
 	if (par->pm_state != PM_EVENT_FREEZE) {
@@ -1097,7 +1097,7 @@
 	nvidiafb_blank(FB_BLANK_UNBLANK, info);
 
 fail:
-	release_console_sem();
+	console_unlock();
 	return 0;
 }
 #else
diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c
index 9c0144e..65560a1 100644
--- a/drivers/video/ps3fb.c
+++ b/drivers/video/ps3fb.c
@@ -513,9 +513,9 @@
 	if (atomic_dec_and_test(&ps3fb.f_count)) {
 		if (atomic_read(&ps3fb.ext_flip)) {
 			atomic_set(&ps3fb.ext_flip, 0);
-			if (!try_acquire_console_sem()) {
+			if (console_trylock()) {
 				ps3fb_sync(info, 0);	/* single buffer */
-				release_console_sem();
+				console_unlock();
 			}
 		}
 	}
@@ -830,14 +830,14 @@
 			if (vmode) {
 				var = info->var;
 				fb_videomode_to_var(&var, vmode);
-				acquire_console_sem();
+				console_lock();
 				info->flags |= FBINFO_MISC_USEREVENT;
 				/* Force, in case only special bits changed */
 				var.activate |= FB_ACTIVATE_FORCE;
 				par->new_mode_id = val;
 				retval = fb_set_var(info, &var);
 				info->flags &= ~FBINFO_MISC_USEREVENT;
-				release_console_sem();
+				console_unlock();
 			}
 			break;
 		}
@@ -881,9 +881,9 @@
 			break;
 
 		dev_dbg(info->device, "PS3FB_IOCTL_FSEL:%d\n", val);
-		acquire_console_sem();
+		console_lock();
 		retval = ps3fb_sync(info, val);
-		release_console_sem();
+		console_unlock();
 		break;
 
 	default:
@@ -903,9 +903,9 @@
 		set_current_state(TASK_INTERRUPTIBLE);
 		if (ps3fb.is_kicked) {
 			ps3fb.is_kicked = 0;
-			acquire_console_sem();
+			console_lock();
 			ps3fb_sync(info, 0);	/* single buffer */
-			release_console_sem();
+			console_unlock();
 		}
 		schedule();
 	}
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index cea6403..35f61dd 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -701,16 +701,12 @@
 	 */
 	pxa168fb_init_mode(info, mi);
 
-	ret = pxa168fb_check_var(&info->var, info);
-	if (ret)
-		goto failed_free_fbmem;
-
 	/*
 	 * Fill in sane defaults.
 	 */
 	ret = pxa168fb_check_var(&info->var, info);
 	if (ret)
-		goto failed;
+		goto failed_free_fbmem;
 
 	/*
 	 * enable controller clock
diff --git a/drivers/video/pxa3xx-gcu.c b/drivers/video/pxa3xx-gcu.c
index b81168d..cf4beb9 100644
--- a/drivers/video/pxa3xx-gcu.c
+++ b/drivers/video/pxa3xx-gcu.c
@@ -1,5 +1,5 @@
 /*
- *  pxa3xx-gc.c - Linux kernel module for PXA3xx graphics controllers
+ *  pxa3xx-gcu.c - Linux kernel module for PXA3xx graphics controllers
  *
  *  This driver needs a DirectFB counterpart in user space, communication
  *  is handled via mmap()ed memory areas and an ioctl.
@@ -421,7 +421,7 @@
 		buffer->next = priv->free;
 		priv->free = buffer;
 		spin_unlock_irqrestore(&priv->spinlock, flags);
-		return ret;
+		return -EFAULT;
 	}
 
 	buffer->length = words;
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index dce8c97..75738a9 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -22,7 +22,7 @@
 #include <linux/svga.h>
 #include <linux/init.h>
 #include <linux/pci.h>
-#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
+#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
 #include <video/vga.h>
 
 #ifdef CONFIG_MTRR
@@ -1113,12 +1113,12 @@
 
 	dev_info(info->device, "suspend\n");
 
-	acquire_console_sem();
+	console_lock();
 	mutex_lock(&(par->open_lock));
 
 	if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
 		mutex_unlock(&(par->open_lock));
-		release_console_sem();
+		console_unlock();
 		return 0;
 	}
 
@@ -1129,7 +1129,7 @@
 	pci_set_power_state(dev, pci_choose_state(dev, state));
 
 	mutex_unlock(&(par->open_lock));
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
@@ -1145,12 +1145,12 @@
 
 	dev_info(info->device, "resume\n");
 
-	acquire_console_sem();
+	console_lock();
 	mutex_lock(&(par->open_lock));
 
 	if (par->ref_count == 0) {
 		mutex_unlock(&(par->open_lock));
-		release_console_sem();
+		console_unlock();
 		return 0;
 	}
 
@@ -1159,7 +1159,7 @@
 	err = pci_enable_device(dev);
 	if (err) {
 		mutex_unlock(&(par->open_lock));
-		release_console_sem();
+		console_unlock();
 		dev_err(info->device, "error %d enabling device for resume\n", err);
 		return err;
 	}
@@ -1169,7 +1169,7 @@
 	fb_set_suspend(info, 0);
 
 	mutex_unlock(&(par->open_lock));
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index 842d157..487911e 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -2373,7 +2373,7 @@
 	if (mesg.event == PM_EVENT_FREEZE)
 		return 0;
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(info, 1);
 
 	if (info->fbops->fb_sync)
@@ -2385,7 +2385,7 @@
 	pci_save_state(dev);
 	pci_disable_device(dev);
 	pci_set_power_state(dev, pci_choose_state(dev, mesg));
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
@@ -2409,7 +2409,7 @@
 		return 0;
 	}
 
-	acquire_console_sem();
+	console_lock();
 
 	pci_set_power_state(dev, PCI_D0);
 	pci_restore_state(dev);
@@ -2423,7 +2423,7 @@
 	savagefb_set_par(info);
 	fb_set_suspend(info, 0);
 	savagefb_blank(FB_BLANK_UNBLANK, info);
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index 74d9f54..2b9e56a 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -1151,7 +1151,7 @@
 
 		ch = info->par;
 
-		acquire_console_sem();
+		console_lock();
 
 		/* HDMI plug in */
 		if (!sh_hdmi_must_reconfigure(hdmi) &&
@@ -1171,7 +1171,7 @@
 			fb_set_suspend(info, 0);
 		}
 
-		release_console_sem();
+		console_unlock();
 	} else {
 		ret = 0;
 		if (!hdmi->info)
@@ -1181,12 +1181,12 @@
 		fb_destroy_modedb(hdmi->monspec.modedb);
 		hdmi->monspec.modedb = NULL;
 
-		acquire_console_sem();
+		console_lock();
 
 		/* HDMI disconnect */
 		fb_set_suspend(hdmi->info, 1);
 
-		release_console_sem();
+		console_unlock();
 		pm_runtime_put(hdmi->dev);
 	}
 
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index bd4840a..bf12e53 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -912,9 +912,9 @@
 
 	/* Nothing to reconfigure, when called from fbcon */
 	if (user) {
-		acquire_console_sem();
+		console_lock();
 		sh_mobile_fb_reconfig(info);
-		release_console_sem();
+		console_unlock();
 	}
 
 	mutex_unlock(&ch->open_lock);
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index b7dc180..bcb44a5 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -2010,9 +2010,9 @@
 
 	/* tell console/fb driver we are suspending */
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(fbi, 1);
-	release_console_sem();
+	console_unlock();
 
 	/* backup copies in case chip is powered down over suspend */
 
@@ -2069,9 +2069,9 @@
 		memcpy_toio(par->cursor.k_addr, par->store_cursor,
 			    par->cursor.size);
 
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(fbi, 0);
-	release_console_sem();
+	console_unlock();
 
 	vfree(par->store_fb);
 	vfree(par->store_cursor);
diff --git a/drivers/video/tmiofb.c b/drivers/video/tmiofb.c
index 6913fe1..dfef88c 100644
--- a/drivers/video/tmiofb.c
+++ b/drivers/video/tmiofb.c
@@ -25,7 +25,7 @@
 #include <linux/fb.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
-/* Why should fb driver call console functions? because acquire_console_sem() */
+/* Why should fb driver call console functions? because console_lock() */
 #include <linux/console.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/tmio.h>
@@ -944,7 +944,7 @@
 	struct mfd_cell *cell = dev->dev.platform_data;
 	int retval = 0;
 
-	acquire_console_sem();
+	console_lock();
 
 	fb_set_suspend(info, 1);
 
@@ -965,7 +965,7 @@
 	if (cell->suspend)
 		retval = cell->suspend(dev);
 
-	release_console_sem();
+	console_unlock();
 
 	return retval;
 }
@@ -976,7 +976,7 @@
 	struct mfd_cell *cell = dev->dev.platform_data;
 	int retval = 0;
 
-	acquire_console_sem();
+	console_lock();
 
 	if (cell->resume) {
 		retval = cell->resume(dev);
@@ -992,7 +992,7 @@
 
 	fb_set_suspend(info, 0);
 out:
-	release_console_sem();
+	console_unlock();
 	return retval;
 }
 #else
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 289edd5..4e66349 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -1674,17 +1674,17 @@
 #ifdef CONFIG_PM
 static int viafb_suspend(void *unused)
 {
-	acquire_console_sem();
+	console_lock();
 	fb_set_suspend(viafbinfo, 1);
 	viafb_sync(viafbinfo);
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
 
 static int viafb_resume(void *unused)
 {
-	acquire_console_sem();
+	console_lock();
 	if (viaparinfo->shared->vdev->engine_mmio)
 		viafb_reset_engine(viaparinfo);
 	viafb_set_par(viafbinfo);
@@ -1692,7 +1692,7 @@
 		viafb_set_par(viafbinfo1);
 	fb_set_suspend(viafbinfo, 0);
 
-	release_console_sem();
+	console_unlock();
 	return 0;
 }
 
diff --git a/drivers/video/vt8623fb.c b/drivers/video/vt8623fb.c
index 85d76ec..a2965ab 100644
--- a/drivers/video/vt8623fb.c
+++ b/drivers/video/vt8623fb.c
@@ -23,7 +23,7 @@
 #include <linux/svga.h>
 #include <linux/init.h>
 #include <linux/pci.h>
-#include <linux/console.h> /* Why should fb driver call console functions? because acquire_console_sem() */
+#include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */
 #include <video/vga.h>
 
 #ifdef CONFIG_MTRR
@@ -819,12 +819,12 @@
 
 	dev_info(info->device, "suspend\n");
 
-	acquire_console_sem();
+	console_lock();
 	mutex_lock(&(par->open_lock));
 
 	if ((state.event == PM_EVENT_FREEZE) || (par->ref_count == 0)) {
 		mutex_unlock(&(par->open_lock));
-		release_console_sem();
+		console_unlock();
 		return 0;
 	}
 
@@ -835,7 +835,7 @@
 	pci_set_power_state(dev, pci_choose_state(dev, state));
 
 	mutex_unlock(&(par->open_lock));
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
@@ -850,7 +850,7 @@
 
 	dev_info(info->device, "resume\n");
 
-	acquire_console_sem();
+	console_lock();
 	mutex_lock(&(par->open_lock));
 
 	if (par->ref_count == 0)
@@ -869,7 +869,7 @@
 
 fail:
 	mutex_unlock(&(par->open_lock));
-	release_console_sem();
+	console_unlock();
 
 	return 0;
 }
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index 3e6934d..a20218c 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -491,12 +491,12 @@
 	if (console_set_on_cmdline)
 		return;
 
-	acquire_console_sem();
+	console_lock();
 	for_each_console(c) {
 		if (!strcmp(c->name, "tty") && c->index == 0)
 			break;
 	}
-	release_console_sem();
+	console_unlock();
 	if (c) {
 		unregister_console(c);
 		c->flags |= CON_CONSDEV;
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index ef8d9d5..4fb5b2b 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -96,11 +96,6 @@
 
 MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
 
-/* A PCI device has it's own struct device and so does a virtio device so
- * we create a place for the virtio devices to show up in sysfs.  I think it
- * would make more sense for virtio to not insist on having it's own device. */
-static struct device *virtio_pci_root;
-
 /* Convert a generic virtio device to our structure */
 static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
 {
@@ -629,7 +624,7 @@
 	if (vp_dev == NULL)
 		return -ENOMEM;
 
-	vp_dev->vdev.dev.parent = virtio_pci_root;
+	vp_dev->vdev.dev.parent = &pci_dev->dev;
 	vp_dev->vdev.dev.release = virtio_pci_release_dev;
 	vp_dev->vdev.config = &virtio_pci_config_ops;
 	vp_dev->pci_dev = pci_dev;
@@ -717,17 +712,7 @@
 
 static int __init virtio_pci_init(void)
 {
-	int err;
-
-	virtio_pci_root = root_device_register("virtio-pci");
-	if (IS_ERR(virtio_pci_root))
-		return PTR_ERR(virtio_pci_root);
-
-	err = pci_register_driver(&virtio_pci_driver);
-	if (err)
-		root_device_unregister(virtio_pci_root);
-
-	return err;
+	return pci_register_driver(&virtio_pci_driver);
 }
 
 module_init(virtio_pci_init);
@@ -735,7 +720,6 @@
 static void __exit virtio_pci_exit(void)
 {
 	pci_unregister_driver(&virtio_pci_driver);
-	root_device_unregister(virtio_pci_root);
 }
 
 module_exit(virtio_pci_exit);
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 3a7e9ff..38e96ab 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -593,19 +593,17 @@
 
 	/* get interface & functional clock objects */
 	hdq_data->hdq_ick = clk_get(&pdev->dev, "ick");
-	hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
+	if (IS_ERR(hdq_data->hdq_ick)) {
+		dev_dbg(&pdev->dev, "Can't get HDQ ick clock object\n");
+		ret = PTR_ERR(hdq_data->hdq_ick);
+		goto err_ick;
+	}
 
-	if (IS_ERR(hdq_data->hdq_ick) || IS_ERR(hdq_data->hdq_fck)) {
-		dev_dbg(&pdev->dev, "Can't get HDQ clock objects\n");
-		if (IS_ERR(hdq_data->hdq_ick)) {
-			ret = PTR_ERR(hdq_data->hdq_ick);
-			goto err_clk;
-		}
-		if (IS_ERR(hdq_data->hdq_fck)) {
-			ret = PTR_ERR(hdq_data->hdq_fck);
-			clk_put(hdq_data->hdq_ick);
-			goto err_clk;
-		}
+	hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
+	if (IS_ERR(hdq_data->hdq_fck)) {
+		dev_dbg(&pdev->dev, "Can't get HDQ fck clock object\n");
+		ret = PTR_ERR(hdq_data->hdq_fck);
+		goto err_fck;
 	}
 
 	hdq_data->hdq_usecount = 0;
@@ -665,10 +663,12 @@
 	clk_disable(hdq_data->hdq_ick);
 
 err_intfclk:
-	clk_put(hdq_data->hdq_ick);
 	clk_put(hdq_data->hdq_fck);
 
-err_clk:
+err_fck:
+	clk_put(hdq_data->hdq_ick);
+
+err_ick:
 	iounmap(hdq_data->hdq_base);
 
 err_ioremap:
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 2e2400e..31649b7 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -862,12 +862,12 @@
 
 # M68K Architecture
 
-config M548x_WATCHDOG
-	tristate "MCF548x watchdog support"
+config M54xx_WATCHDOG
+	tristate "MCF54xx watchdog support"
 	depends on M548x
 	help
 	  To compile this driver as a module, choose M here: the
-	  module will be called m548x_wdt.
+	  module will be called m54xx_wdt.
 
 # MIPS Architecture
 
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index dd77665..20e44c4 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -106,7 +106,7 @@
 # M32R Architecture
 
 # M68K Architecture
-obj-$(CONFIG_M548x_WATCHDOG) += m548x_wdt.o
+obj-$(CONFIG_M54xx_WATCHDOG) += m54xx_wdt.o
 
 # MIPS Architecture
 obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o
diff --git a/drivers/watchdog/m548x_wdt.c b/drivers/watchdog/m54xx_wdt.c
similarity index 80%
rename from drivers/watchdog/m548x_wdt.c
rename to drivers/watchdog/m54xx_wdt.c
index cabbcfe..4d43286 100644
--- a/drivers/watchdog/m548x_wdt.c
+++ b/drivers/watchdog/m54xx_wdt.c
@@ -1,7 +1,7 @@
 /*
- * drivers/watchdog/m548x_wdt.c
+ * drivers/watchdog/m54xx_wdt.c
  *
- * Watchdog driver for ColdFire MCF548x processors
+ * Watchdog driver for ColdFire MCF547x & MCF548x processors
  * Copyright 2010 (c) Philippe De Muyter <phdm@macqel.be>
  *
  * Adapted from the IXP4xx watchdog driver, which carries these notices:
@@ -29,8 +29,8 @@
 #include <linux/uaccess.h>
 
 #include <asm/coldfire.h>
-#include <asm/m548xsim.h>
-#include <asm/m548xgpt.h>
+#include <asm/m54xxsim.h>
+#include <asm/m54xxgpt.h>
 
 static int nowayout = WATCHDOG_NOWAYOUT;
 static unsigned int heartbeat = 30;	/* (secs) Default is 0.5 minute */
@@ -76,7 +76,7 @@
 	__raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
 }
 
-static int m548x_wdt_open(struct inode *inode, struct file *file)
+static int m54xx_wdt_open(struct inode *inode, struct file *file)
 {
 	if (test_and_set_bit(WDT_IN_USE, &wdt_status))
 		return -EBUSY;
@@ -86,7 +86,7 @@
 	return nonseekable_open(inode, file);
 }
 
-static ssize_t m548x_wdt_write(struct file *file, const char *data,
+static ssize_t m54xx_wdt_write(struct file *file, const char *data,
 						size_t len, loff_t *ppos)
 {
 	if (len) {
@@ -112,10 +112,10 @@
 static const struct watchdog_info ident = {
 	.options	= WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT |
 				WDIOF_KEEPALIVEPING,
-	.identity	= "Coldfire M548x Watchdog",
+	.identity	= "Coldfire M54xx Watchdog",
 };
 
-static long m548x_wdt_ioctl(struct file *file, unsigned int cmd,
+static long m54xx_wdt_ioctl(struct file *file, unsigned int cmd,
 							 unsigned long arg)
 {
 	int ret = -ENOTTY;
@@ -161,7 +161,7 @@
 	return ret;
 }
 
-static int m548x_wdt_release(struct inode *inode, struct file *file)
+static int m54xx_wdt_release(struct inode *inode, struct file *file)
 {
 	if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
 		wdt_disable();
@@ -177,45 +177,45 @@
 }
 
 
-static const struct file_operations m548x_wdt_fops = {
+static const struct file_operations m54xx_wdt_fops = {
 	.owner		= THIS_MODULE,
 	.llseek		= no_llseek,
-	.write		= m548x_wdt_write,
-	.unlocked_ioctl	= m548x_wdt_ioctl,
-	.open		= m548x_wdt_open,
-	.release	= m548x_wdt_release,
+	.write		= m54xx_wdt_write,
+	.unlocked_ioctl	= m54xx_wdt_ioctl,
+	.open		= m54xx_wdt_open,
+	.release	= m54xx_wdt_release,
 };
 
-static struct miscdevice m548x_wdt_miscdev = {
+static struct miscdevice m54xx_wdt_miscdev = {
 	.minor		= WATCHDOG_MINOR,
 	.name		= "watchdog",
-	.fops		= &m548x_wdt_fops,
+	.fops		= &m54xx_wdt_fops,
 };
 
-static int __init m548x_wdt_init(void)
+static int __init m54xx_wdt_init(void)
 {
 	if (!request_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4,
-						"Coldfire M548x Watchdog")) {
+						"Coldfire M54xx Watchdog")) {
 		printk(KERN_WARNING
-				"Coldfire M548x Watchdog : I/O region busy\n");
+				"Coldfire M54xx Watchdog : I/O region busy\n");
 		return -EBUSY;
 	}
 	printk(KERN_INFO "ColdFire watchdog driver is loaded.\n");
 
-	return misc_register(&m548x_wdt_miscdev);
+	return misc_register(&m54xx_wdt_miscdev);
 }
 
-static void __exit m548x_wdt_exit(void)
+static void __exit m54xx_wdt_exit(void)
 {
-	misc_deregister(&m548x_wdt_miscdev);
+	misc_deregister(&m54xx_wdt_miscdev);
 	release_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4);
 }
 
-module_init(m548x_wdt_init);
-module_exit(m548x_wdt_exit);
+module_init(m54xx_wdt_init);
+module_exit(m54xx_wdt_exit);
 
 MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>");
-MODULE_DESCRIPTION("Coldfire M548x Watchdog");
+MODULE_DESCRIPTION("Coldfire M54xx Watchdog");
 
 module_param(heartbeat, int, 0);
 MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 30s)");
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index db8c4c4..2417727 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -37,11 +37,19 @@
 #ifdef CONFIG_PM_SLEEP
 static int xen_hvm_suspend(void *data)
 {
+	int err;
 	struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
 	int *cancelled = data;
 
 	BUG_ON(!irqs_disabled());
 
+	err = sysdev_suspend(PMSG_SUSPEND);
+	if (err) {
+		printk(KERN_ERR "xen_hvm_suspend: sysdev_suspend failed: %d\n",
+		       err);
+		return err;
+	}
+
 	*cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
 
 	xen_hvm_post_suspend(*cancelled);
@@ -53,6 +61,8 @@
 		xen_timer_resume();
 	}
 
+	sysdev_resume();
+
 	return 0;
 }
 
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c
index 1c12360..bbd000f 100644
--- a/drivers/xen/xenfs/xenbus.c
+++ b/drivers/xen/xenfs/xenbus.c
@@ -122,6 +122,7 @@
 	int ret;
 
 	mutex_lock(&u->reply_mutex);
+again:
 	while (list_empty(&u->read_buffers)) {
 		mutex_unlock(&u->reply_mutex);
 		if (filp->f_flags & O_NONBLOCK)
@@ -144,7 +145,7 @@
 		i += sz - ret;
 		rb->cons += sz - ret;
 
-		if (ret != sz) {
+		if (ret != 0) {
 			if (i == 0)
 				i = -EFAULT;
 			goto out;
@@ -160,6 +161,8 @@
 					struct read_buffer, list);
 		}
 	}
+	if (i == 0)
+		goto again;
 
 out:
 	mutex_unlock(&u->reply_mutex);
@@ -407,6 +410,7 @@
 
 		mutex_lock(&u->reply_mutex);
 		rc = queue_reply(&u->read_buffers, &reply, sizeof(reply));
+		wake_up(&u->read_waitq);
 		mutex_unlock(&u->reply_mutex);
 	}
 
@@ -455,7 +459,7 @@
 
 	ret = copy_from_user(u->u.buffer + u->len, ubuf, len);
 
-	if (ret == len) {
+	if (ret != 0) {
 		rc = -EFAULT;
 		goto out;
 	}
@@ -488,21 +492,6 @@
 	msg_type = u->u.msg.type;
 
 	switch (msg_type) {
-	case XS_TRANSACTION_START:
-	case XS_TRANSACTION_END:
-	case XS_DIRECTORY:
-	case XS_READ:
-	case XS_GET_PERMS:
-	case XS_RELEASE:
-	case XS_GET_DOMAIN_PATH:
-	case XS_WRITE:
-	case XS_MKDIR:
-	case XS_RM:
-	case XS_SET_PERMS:
-		/* Send out a transaction */
-		ret = xenbus_write_transaction(msg_type, u);
-		break;
-
 	case XS_WATCH:
 	case XS_UNWATCH:
 		/* (Un)Ask for some path to be watched for changes */
@@ -510,7 +499,8 @@
 		break;
 
 	default:
-		ret = -EINVAL;
+		/* Send out a transaction */
+		ret = xenbus_write_transaction(msg_type, u);
 		break;
 	}
 	if (ret != 0)
@@ -555,6 +545,7 @@
 	struct xenbus_file_priv *u = filp->private_data;
 	struct xenbus_transaction_holder *trans, *tmp;
 	struct watch_adapter *watch, *tmp_watch;
+	struct read_buffer *rb, *tmp_rb;
 
 	/*
 	 * No need for locking here because there are no other users,
@@ -573,6 +564,10 @@
 		free_watch_adapter(watch);
 	}
 
+	list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) {
+		list_del(&rb->list);
+		kfree(rb);
+	}
 	kfree(u);
 
 	return 0;
diff --git a/fs/Kconfig b/fs/Kconfig
index 9a7921a..3db9caa 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -50,7 +50,7 @@
 	tristate
 
 config FILE_LOCKING
-	bool "Enable POSIX file locking API" if EMBEDDED
+	bool "Enable POSIX file locking API" if EXPERT
 	default y
 	help
 	  This option enables standard file locking support, required
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 333a7bb..4fb8a34 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1215,12 +1215,6 @@
 
 	res = __blkdev_get(bdev, mode, 0);
 
-	/* __blkdev_get() may alter read only status, check it afterwards */
-	if (!res && (mode & FMODE_WRITE) && bdev_read_only(bdev)) {
-		__blkdev_put(bdev, mode, 0);
-		res = -EACCES;
-	}
-
 	if (whole) {
 		/* finish claiming */
 		mutex_lock(&bdev->bd_mutex);
@@ -1298,6 +1292,11 @@
 	if (err)
 		return ERR_PTR(err);
 
+	if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) {
+		blkdev_put(bdev, mode);
+		return ERR_PTR(-EACCES);
+	}
+
 	return bdev;
 }
 EXPORT_SYMBOL(blkdev_get_by_path);
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 15b5ca2..9c94934 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -37,6 +37,9 @@
 	char *value = NULL;
 	struct posix_acl *acl;
 
+	if (!IS_POSIXACL(inode))
+		return NULL;
+
 	acl = get_cached_acl(inode, type);
 	if (acl != ACL_NOT_CACHED)
 		return acl;
@@ -84,6 +87,9 @@
 	struct posix_acl *acl;
 	int ret = 0;
 
+	if (!IS_POSIXACL(dentry->d_inode))
+		return -EOPNOTSUPP;
+
 	acl = btrfs_get_acl(dentry->d_inode, type);
 
 	if (IS_ERR(acl))
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index f745287..4d2110e 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -562,7 +562,7 @@
 	u64 em_len;
 	u64 em_start;
 	struct extent_map *em;
-	int ret;
+	int ret = -ENOMEM;
 	u32 *sums;
 
 	tree = &BTRFS_I(inode)->io_tree;
@@ -577,6 +577,9 @@
 
 	compressed_len = em->block_len;
 	cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
+	if (!cb)
+		goto out;
+
 	atomic_set(&cb->pending_bios, 0);
 	cb->errors = 0;
 	cb->inode = inode;
@@ -597,13 +600,18 @@
 
 	nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) /
 				 PAGE_CACHE_SIZE;
-	cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages,
+	cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages,
 				       GFP_NOFS);
+	if (!cb->compressed_pages)
+		goto fail1;
+
 	bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
 
 	for (page_index = 0; page_index < nr_pages; page_index++) {
 		cb->compressed_pages[page_index] = alloc_page(GFP_NOFS |
 							      __GFP_HIGHMEM);
+		if (!cb->compressed_pages[page_index])
+			goto fail2;
 	}
 	cb->nr_pages = nr_pages;
 
@@ -614,6 +622,8 @@
 	cb->len = uncompressed_len;
 
 	comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
+	if (!comp_bio)
+		goto fail2;
 	comp_bio->bi_private = cb;
 	comp_bio->bi_end_io = end_compressed_bio_read;
 	atomic_inc(&cb->pending_bios);
@@ -681,6 +691,17 @@
 
 	bio_put(comp_bio);
 	return 0;
+
+fail2:
+	for (page_index = 0; page_index < nr_pages; page_index++)
+		free_page((unsigned long)cb->compressed_pages[page_index]);
+
+	kfree(cb->compressed_pages);
+fail1:
+	kfree(cb);
+out:
+	free_extent_map(em);
+	return ret;
 }
 
 static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES];
@@ -900,7 +921,7 @@
 	return ret;
 }
 
-void __exit btrfs_exit_compress(void)
+void btrfs_exit_compress(void)
 {
 	free_workspaces();
 }
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index b531c36..e1aa8d6 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -359,10 +359,14 @@
 
 	tree = &BTRFS_I(page->mapping->host)->io_tree;
 
-	if (page->private == EXTENT_PAGE_PRIVATE)
+	if (page->private == EXTENT_PAGE_PRIVATE) {
+		WARN_ON(1);
 		goto out;
-	if (!page->private)
+	}
+	if (!page->private) {
+		WARN_ON(1);
 		goto out;
+	}
 	len = page->private >> 2;
 	WARN_ON(len == 0);
 
@@ -1550,6 +1554,7 @@
 		spin_unlock(&root->fs_info->new_trans_lock);
 
 		trans = btrfs_join_transaction(root, 1);
+		BUG_ON(IS_ERR(trans));
 		if (transid == trans->transid) {
 			ret = btrfs_commit_transaction(trans, root);
 			BUG_ON(ret);
@@ -2453,10 +2458,14 @@
 	up_write(&root->fs_info->cleanup_work_sem);
 
 	trans = btrfs_join_transaction(root, 1);
+	if (IS_ERR(trans))
+		return PTR_ERR(trans);
 	ret = btrfs_commit_transaction(trans, root);
 	BUG_ON(ret);
 	/* run commit again to drop the original snapshot */
 	trans = btrfs_join_transaction(root, 1);
+	if (IS_ERR(trans))
+		return PTR_ERR(trans);
 	btrfs_commit_transaction(trans, root);
 	ret = btrfs_write_and_wait_transaction(NULL, root);
 	BUG_ON(ret);
@@ -2554,6 +2563,8 @@
 	kfree(fs_info->chunk_root);
 	kfree(fs_info->dev_root);
 	kfree(fs_info->csum_root);
+	kfree(fs_info);
+
 	return 0;
 }
 
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 9786963..ff27d7a 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -171,6 +171,8 @@
 	int ret;
 
 	path = btrfs_alloc_path();
+	if (!path)
+		return ERR_PTR(-ENOMEM);
 
 	if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
 		key.objectid = root->root_key.objectid;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index b552693..f3c96fc 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -320,11 +320,6 @@
 	if (!path)
 		return -ENOMEM;
 
-	exclude_super_stripes(extent_root, block_group);
-	spin_lock(&block_group->space_info->lock);
-	block_group->space_info->bytes_readonly += block_group->bytes_super;
-	spin_unlock(&block_group->space_info->lock);
-
 	last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
 
 	/*
@@ -467,8 +462,10 @@
 			cache->cached = BTRFS_CACHE_NO;
 		}
 		spin_unlock(&cache->lock);
-		if (ret == 1)
+		if (ret == 1) {
+			free_excluded_extents(fs_info->extent_root, cache);
 			return 0;
+		}
 	}
 
 	if (load_cache_only)
@@ -3344,8 +3341,10 @@
 	u64 reserved;
 	u64 max_reclaim;
 	u64 reclaimed = 0;
+	long time_left;
 	int pause = 1;
 	int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
+	int loops = 0;
 
 	block_rsv = &root->fs_info->delalloc_block_rsv;
 	space_info = block_rsv->space_info;
@@ -3358,7 +3357,7 @@
 
 	max_reclaim = min(reserved, to_reclaim);
 
-	while (1) {
+	while (loops < 1024) {
 		/* have the flusher threads jump in and do some IO */
 		smp_mb();
 		nr_pages = min_t(unsigned long, nr_pages,
@@ -3366,8 +3365,12 @@
 		writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
 
 		spin_lock(&space_info->lock);
-		if (reserved > space_info->bytes_reserved)
+		if (reserved > space_info->bytes_reserved) {
+			loops = 0;
 			reclaimed += reserved - space_info->bytes_reserved;
+		} else {
+			loops++;
+		}
 		reserved = space_info->bytes_reserved;
 		spin_unlock(&space_info->lock);
 
@@ -3378,7 +3381,12 @@
 			return -EAGAIN;
 
 		__set_current_state(TASK_INTERRUPTIBLE);
-		schedule_timeout(pause);
+		time_left = schedule_timeout(pause);
+
+		/* We were interrupted, exit */
+		if (time_left)
+			break;
+
 		pause <<= 1;
 		if (pause > HZ / 10)
 			pause = HZ / 10;
@@ -3588,8 +3596,20 @@
 
 	if (num_bytes > 0) {
 		if (dest) {
-			block_rsv_add_bytes(dest, num_bytes, 0);
-		} else {
+			spin_lock(&dest->lock);
+			if (!dest->full) {
+				u64 bytes_to_add;
+
+				bytes_to_add = dest->size - dest->reserved;
+				bytes_to_add = min(num_bytes, bytes_to_add);
+				dest->reserved += bytes_to_add;
+				if (dest->reserved >= dest->size)
+					dest->full = 1;
+				num_bytes -= bytes_to_add;
+			}
+			spin_unlock(&dest->lock);
+		}
+		if (num_bytes) {
 			spin_lock(&space_info->lock);
 			space_info->bytes_reserved -= num_bytes;
 			spin_unlock(&space_info->lock);
@@ -4012,6 +4032,7 @@
 
 	num_bytes = ALIGN(num_bytes, root->sectorsize);
 	atomic_dec(&BTRFS_I(inode)->outstanding_extents);
+	WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
 
 	spin_lock(&BTRFS_I(inode)->accounting_lock);
 	nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
@@ -5633,6 +5654,7 @@
 	      struct btrfs_root *root, u32 blocksize)
 {
 	struct btrfs_block_rsv *block_rsv;
+	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
 	int ret;
 
 	block_rsv = get_block_rsv(trans, root);
@@ -5640,14 +5662,39 @@
 	if (block_rsv->size == 0) {
 		ret = reserve_metadata_bytes(trans, root, block_rsv,
 					     blocksize, 0);
-		if (ret)
+		/*
+		 * If we couldn't reserve metadata bytes try and use some from
+		 * the global reserve.
+		 */
+		if (ret && block_rsv != global_rsv) {
+			ret = block_rsv_use_bytes(global_rsv, blocksize);
+			if (!ret)
+				return global_rsv;
 			return ERR_PTR(ret);
+		} else if (ret) {
+			return ERR_PTR(ret);
+		}
 		return block_rsv;
 	}
 
 	ret = block_rsv_use_bytes(block_rsv, blocksize);
 	if (!ret)
 		return block_rsv;
+	if (ret) {
+		WARN_ON(1);
+		ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize,
+					     0);
+		if (!ret) {
+			spin_lock(&block_rsv->lock);
+			block_rsv->size += blocksize;
+			spin_unlock(&block_rsv->lock);
+			return block_rsv;
+		} else if (ret && block_rsv != global_rsv) {
+			ret = block_rsv_use_bytes(global_rsv, blocksize);
+			if (!ret)
+				return global_rsv;
+		}
+	}
 
 	return ERR_PTR(-ENOSPC);
 }
@@ -6221,6 +6268,8 @@
 	BUG_ON(!wc);
 
 	trans = btrfs_start_transaction(tree_root, 0);
+	BUG_ON(IS_ERR(trans));
+
 	if (block_rsv)
 		trans->block_rsv = block_rsv;
 
@@ -6318,6 +6367,7 @@
 
 			btrfs_end_transaction_throttle(trans, tree_root);
 			trans = btrfs_start_transaction(tree_root, 0);
+			BUG_ON(IS_ERR(trans));
 			if (block_rsv)
 				trans->block_rsv = block_rsv;
 		}
@@ -6446,6 +6496,8 @@
 	int ret = 0;
 
 	ra = kzalloc(sizeof(*ra), GFP_NOFS);
+	if (!ra)
+		return -ENOMEM;
 
 	mutex_lock(&inode->i_mutex);
 	first_index = start >> PAGE_CACHE_SHIFT;
@@ -6531,7 +6583,7 @@
 	u64 end = start + extent_key->offset - 1;
 
 	em = alloc_extent_map(GFP_NOFS);
-	BUG_ON(!em || IS_ERR(em));
+	BUG_ON(!em);
 
 	em->start = start;
 	em->len = extent_key->offset;
@@ -7477,7 +7529,7 @@
 		BUG_ON(reloc_root->commit_root != NULL);
 		while (1) {
 			trans = btrfs_join_transaction(root, 1);
-			BUG_ON(!trans);
+			BUG_ON(IS_ERR(trans));
 
 			mutex_lock(&root->fs_info->drop_mutex);
 			ret = btrfs_drop_snapshot(trans, reloc_root);
@@ -7535,7 +7587,7 @@
 
 	if (found) {
 		trans = btrfs_start_transaction(root, 1);
-		BUG_ON(!trans);
+		BUG_ON(IS_ERR(trans));
 		ret = btrfs_commit_transaction(trans, root);
 		BUG_ON(ret);
 	}
@@ -7779,7 +7831,7 @@
 
 
 	trans = btrfs_start_transaction(extent_root, 1);
-	BUG_ON(!trans);
+	BUG_ON(IS_ERR(trans));
 
 	if (extent_key->objectid == 0) {
 		ret = del_extent_zero(trans, extent_root, path, extent_key);
@@ -8270,6 +8322,13 @@
 		if (block_group->cached == BTRFS_CACHE_STARTED)
 			wait_block_group_cache_done(block_group);
 
+		/*
+		 * We haven't cached this block group, which means we could
+		 * possibly have excluded extents on this block group.
+		 */
+		if (block_group->cached == BTRFS_CACHE_NO)
+			free_excluded_extents(info->extent_root, block_group);
+
 		btrfs_remove_free_space_cache(block_group);
 		btrfs_put_block_group(block_group);
 
@@ -8385,6 +8444,13 @@
 		cache->sectorsize = root->sectorsize;
 
 		/*
+		 * We need to exclude the super stripes now so that the space
+		 * info has super bytes accounted for, otherwise we'll think
+		 * we have more space than we actually do.
+		 */
+		exclude_super_stripes(root, cache);
+
+		/*
 		 * check for two cases, either we are full, and therefore
 		 * don't need to bother with the caching work since we won't
 		 * find any space, or we are empty, and we can just add all
@@ -8392,12 +8458,10 @@
 		 * time, particularly in the full case.
 		 */
 		if (found_key.offset == btrfs_block_group_used(&cache->item)) {
-			exclude_super_stripes(root, cache);
 			cache->last_byte_to_unpin = (u64)-1;
 			cache->cached = BTRFS_CACHE_FINISHED;
 			free_excluded_extents(root, cache);
 		} else if (btrfs_block_group_used(&cache->item) == 0) {
-			exclude_super_stripes(root, cache);
 			cache->last_byte_to_unpin = (u64)-1;
 			cache->cached = BTRFS_CACHE_FINISHED;
 			add_new_free_space(cache, root->fs_info,
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 2e993cf..92ac519 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1865,7 +1865,7 @@
 	bio_get(bio);
 
 	if (tree->ops && tree->ops->submit_bio_hook)
-		tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
+		ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
 					   mirror_num, bio_flags, start);
 	else
 		submit_bio(rw, bio);
@@ -1920,6 +1920,8 @@
 		nr = bio_get_nr_vecs(bdev);
 
 	bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
+	if (!bio)
+		return -ENOMEM;
 
 	bio_add_page(bio, page, page_size, offset);
 	bio->bi_end_io = end_io_func;
@@ -1944,6 +1946,7 @@
 
 static void set_page_extent_head(struct page *page, unsigned long len)
 {
+	WARN_ON(!PagePrivate(page));
 	set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
 }
 
@@ -2126,7 +2129,7 @@
 	ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
 				      &bio_flags);
 	if (bio)
-		submit_one_bio(READ, bio, 0, bio_flags);
+		ret = submit_one_bio(READ, bio, 0, bio_flags);
 	return ret;
 }
 
@@ -2819,9 +2822,17 @@
 		 * at this point we can safely clear everything except the
 		 * locked bit and the nodatasum bit
 		 */
-		clear_extent_bit(tree, start, end,
+		ret = clear_extent_bit(tree, start, end,
 				 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
 				 0, 0, NULL, mask);
+
+		/* if clear_extent_bit failed for enomem reasons,
+		 * we can't allow the release to continue.
+		 */
+		if (ret < 0)
+			ret = 0;
+		else
+			ret = 1;
 	}
 	return ret;
 }
@@ -3192,7 +3203,13 @@
 		}
 		if (!PageUptodate(p))
 			uptodate = 0;
-		unlock_page(p);
+
+		/*
+		 * see below about how we avoid a nasty race with release page
+		 * and why we unlock later
+		 */
+		if (i != 0)
+			unlock_page(p);
 	}
 	if (uptodate)
 		set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
@@ -3216,9 +3233,26 @@
 	atomic_inc(&eb->refs);
 	spin_unlock(&tree->buffer_lock);
 	radix_tree_preload_end();
+
+	/*
+	 * there is a race where release page may have
+	 * tried to find this extent buffer in the radix
+	 * but failed.  It will tell the VM it is safe to
+	 * reclaim the, and it will clear the page private bit.
+	 * We must make sure to set the page private bit properly
+	 * after the extent buffer is in the radix tree so
+	 * it doesn't get lost
+	 */
+	set_page_extent_mapped(eb->first_page);
+	set_page_extent_head(eb->first_page, eb->len);
+	if (!page0)
+		unlock_page(eb->first_page);
 	return eb;
 
 free_eb:
+	if (eb->first_page && !page0)
+		unlock_page(eb->first_page);
+
 	if (!atomic_dec_and_test(&eb->refs))
 		return exists;
 	btrfs_release_extent_buffer(eb);
@@ -3269,10 +3303,11 @@
 			continue;
 
 		lock_page(page);
+		WARN_ON(!PagePrivate(page));
+
+		set_page_extent_mapped(page);
 		if (i == 0)
 			set_page_extent_head(page, eb->len);
-		else
-			set_page_private(page, EXTENT_PAGE_PRIVATE);
 
 		clear_page_dirty_for_io(page);
 		spin_lock_irq(&page->mapping->tree_lock);
@@ -3462,6 +3497,13 @@
 
 	for (i = start_i; i < num_pages; i++) {
 		page = extent_buffer_page(eb, i);
+
+		WARN_ON(!PagePrivate(page));
+
+		set_page_extent_mapped(page);
+		if (i == 0)
+			set_page_extent_head(page, eb->len);
+
 		if (inc_all_pages)
 			page_cache_get(page);
 		if (!PageUptodate(page)) {
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index b0e1fce..2b6c12e 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -51,8 +51,8 @@
 {
 	struct extent_map *em;
 	em = kmem_cache_alloc(extent_map_cache, mask);
-	if (!em || IS_ERR(em))
-		return em;
+	if (!em)
+		return NULL;
 	em->in_tree = 0;
 	em->flags = 0;
 	em->compress_type = BTRFS_COMPRESS_NONE;
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index a562a25..4f19a3e 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -536,6 +536,8 @@
 	root = root->fs_info->csum_root;
 
 	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
 
 	while (1) {
 		key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
@@ -548,7 +550,10 @@
 			if (path->slots[0] == 0)
 				goto out;
 			path->slots[0]--;
+		} else if (ret < 0) {
+			goto out;
 		}
+
 		leaf = path->nodes[0];
 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
 
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c800d58..7084140 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -186,6 +186,7 @@
 			split = alloc_extent_map(GFP_NOFS);
 		if (!split2)
 			split2 = alloc_extent_map(GFP_NOFS);
+		BUG_ON(!split || !split2);
 
 		write_lock(&em_tree->lock);
 		em = lookup_extent_mapping(em_tree, start, len);
@@ -793,8 +794,12 @@
 	for (i = 0; i < num_pages; i++) {
 		pages[i] = grab_cache_page(inode->i_mapping, index + i);
 		if (!pages[i]) {
-			err = -ENOMEM;
-			BUG_ON(1);
+			int c;
+			for (c = i - 1; c >= 0; c--) {
+				unlock_page(pages[c]);
+				page_cache_release(pages[c]);
+			}
+			return -ENOMEM;
 		}
 		wait_on_page_writeback(pages[i]);
 	}
@@ -946,6 +951,10 @@
 		     PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
 		     (sizeof(struct page *)));
 	pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
+	if (!pages) {
+		ret = -ENOMEM;
+		goto out;
+	}
 
 	/* generic_write_checks can change our pos */
 	start_pos = pos;
@@ -984,8 +993,8 @@
 		size_t write_bytes = min(iov_iter_count(&i),
 					 nrptrs * (size_t)PAGE_CACHE_SIZE -
 					 offset);
-		size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
-					PAGE_CACHE_SHIFT;
+		size_t num_pages = (write_bytes + offset +
+				    PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 
 		WARN_ON(num_pages > nrptrs);
 		memset(pages, 0, sizeof(struct page *) * nrptrs);
@@ -1015,8 +1024,8 @@
 
 		copied = btrfs_copy_from_user(pos, num_pages,
 					   write_bytes, pages, &i);
-		dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >>
-					PAGE_CACHE_SHIFT;
+		dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >>
+				PAGE_CACHE_SHIFT;
 
 		if (num_pages > dirty_pages) {
 			if (copied > 0)
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 60d6842..a039065 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -987,11 +987,18 @@
 	return entry;
 }
 
-static void unlink_free_space(struct btrfs_block_group_cache *block_group,
-			      struct btrfs_free_space *info)
+static inline void
+__unlink_free_space(struct btrfs_block_group_cache *block_group,
+		    struct btrfs_free_space *info)
 {
 	rb_erase(&info->offset_index, &block_group->free_space_offset);
 	block_group->free_extents--;
+}
+
+static void unlink_free_space(struct btrfs_block_group_cache *block_group,
+			      struct btrfs_free_space *info)
+{
+	__unlink_free_space(block_group, info);
 	block_group->free_space -= info->bytes;
 }
 
@@ -1016,14 +1023,18 @@
 	u64 max_bytes;
 	u64 bitmap_bytes;
 	u64 extent_bytes;
+	u64 size = block_group->key.offset;
 
 	/*
 	 * The goal is to keep the total amount of memory used per 1gb of space
 	 * at or below 32k, so we need to adjust how much memory we allow to be
 	 * used by extent based free space tracking
 	 */
-	max_bytes = MAX_CACHE_BYTES_PER_GIG *
-		(div64_u64(block_group->key.offset, 1024 * 1024 * 1024));
+	if (size < 1024 * 1024 * 1024)
+		max_bytes = MAX_CACHE_BYTES_PER_GIG;
+	else
+		max_bytes = MAX_CACHE_BYTES_PER_GIG *
+			div64_u64(size, 1024 * 1024 * 1024);
 
 	/*
 	 * we want to account for 1 more bitmap than what we have so we can make
@@ -1171,6 +1182,16 @@
 	recalculate_thresholds(block_group);
 }
 
+static void free_bitmap(struct btrfs_block_group_cache *block_group,
+			struct btrfs_free_space *bitmap_info)
+{
+	unlink_free_space(block_group, bitmap_info);
+	kfree(bitmap_info->bitmap);
+	kfree(bitmap_info);
+	block_group->total_bitmaps--;
+	recalculate_thresholds(block_group);
+}
+
 static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
 			      struct btrfs_free_space *bitmap_info,
 			      u64 *offset, u64 *bytes)
@@ -1195,6 +1216,7 @@
 	 */
 	search_start = *offset;
 	search_bytes = *bytes;
+	search_bytes = min(search_bytes, end - search_start + 1);
 	ret = search_bitmap(block_group, bitmap_info, &search_start,
 			    &search_bytes);
 	BUG_ON(ret < 0 || search_start != *offset);
@@ -1211,13 +1233,8 @@
 
 	if (*bytes) {
 		struct rb_node *next = rb_next(&bitmap_info->offset_index);
-		if (!bitmap_info->bytes) {
-			unlink_free_space(block_group, bitmap_info);
-			kfree(bitmap_info->bitmap);
-			kfree(bitmap_info);
-			block_group->total_bitmaps--;
-			recalculate_thresholds(block_group);
-		}
+		if (!bitmap_info->bytes)
+			free_bitmap(block_group, bitmap_info);
 
 		/*
 		 * no entry after this bitmap, but we still have bytes to
@@ -1250,13 +1267,8 @@
 			return -EAGAIN;
 
 		goto again;
-	} else if (!bitmap_info->bytes) {
-		unlink_free_space(block_group, bitmap_info);
-		kfree(bitmap_info->bitmap);
-		kfree(bitmap_info);
-		block_group->total_bitmaps--;
-		recalculate_thresholds(block_group);
-	}
+	} else if (!bitmap_info->bytes)
+		free_bitmap(block_group, bitmap_info);
 
 	return 0;
 }
@@ -1359,22 +1371,14 @@
 	return ret;
 }
 
-int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
-			 u64 offset, u64 bytes)
+bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
+			  struct btrfs_free_space *info, bool update_stat)
 {
-	struct btrfs_free_space *right_info = NULL;
-	struct btrfs_free_space *left_info = NULL;
-	struct btrfs_free_space *info = NULL;
-	int ret = 0;
-
-	info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
-	if (!info)
-		return -ENOMEM;
-
-	info->offset = offset;
-	info->bytes = bytes;
-
-	spin_lock(&block_group->tree_lock);
+	struct btrfs_free_space *left_info;
+	struct btrfs_free_space *right_info;
+	bool merged = false;
+	u64 offset = info->offset;
+	u64 bytes = info->bytes;
 
 	/*
 	 * first we want to see if there is free space adjacent to the range we
@@ -1388,37 +1392,62 @@
 	else
 		left_info = tree_search_offset(block_group, offset - 1, 0, 0);
 
-	/*
-	 * If there was no extent directly to the left or right of this new
-	 * extent then we know we're going to have to allocate a new extent, so
-	 * before we do that see if we need to drop this into a bitmap
-	 */
-	if ((!left_info || left_info->bitmap) &&
-	    (!right_info || right_info->bitmap)) {
-		ret = insert_into_bitmap(block_group, info);
-
-		if (ret < 0) {
-			goto out;
-		} else if (ret) {
-			ret = 0;
-			goto out;
-		}
-	}
-
 	if (right_info && !right_info->bitmap) {
-		unlink_free_space(block_group, right_info);
+		if (update_stat)
+			unlink_free_space(block_group, right_info);
+		else
+			__unlink_free_space(block_group, right_info);
 		info->bytes += right_info->bytes;
 		kfree(right_info);
+		merged = true;
 	}
 
 	if (left_info && !left_info->bitmap &&
 	    left_info->offset + left_info->bytes == offset) {
-		unlink_free_space(block_group, left_info);
+		if (update_stat)
+			unlink_free_space(block_group, left_info);
+		else
+			__unlink_free_space(block_group, left_info);
 		info->offset = left_info->offset;
 		info->bytes += left_info->bytes;
 		kfree(left_info);
+		merged = true;
 	}
 
+	return merged;
+}
+
+int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
+			 u64 offset, u64 bytes)
+{
+	struct btrfs_free_space *info;
+	int ret = 0;
+
+	info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
+	if (!info)
+		return -ENOMEM;
+
+	info->offset = offset;
+	info->bytes = bytes;
+
+	spin_lock(&block_group->tree_lock);
+
+	if (try_merge_free_space(block_group, info, true))
+		goto link;
+
+	/*
+	 * There was no extent directly to the left or right of this new
+	 * extent then we know we're going to have to allocate a new extent, so
+	 * before we do that see if we need to drop this into a bitmap
+	 */
+	ret = insert_into_bitmap(block_group, info);
+	if (ret < 0) {
+		goto out;
+	} else if (ret) {
+		ret = 0;
+		goto out;
+	}
+link:
 	ret = link_free_space(block_group, info);
 	if (ret)
 		kfree(info);
@@ -1621,6 +1650,7 @@
 		node = rb_next(&entry->offset_index);
 		rb_erase(&entry->offset_index, &cluster->root);
 		BUG_ON(entry->bitmap);
+		try_merge_free_space(block_group, entry, false);
 		tree_insert_offset(&block_group->free_space_offset,
 				   entry->offset, &entry->offset_index, 0);
 	}
@@ -1685,13 +1715,8 @@
 	ret = offset;
 	if (entry->bitmap) {
 		bitmap_clear_bits(block_group, entry, offset, bytes);
-		if (!entry->bytes) {
-			unlink_free_space(block_group, entry);
-			kfree(entry->bitmap);
-			kfree(entry);
-			block_group->total_bitmaps--;
-			recalculate_thresholds(block_group);
-		}
+		if (!entry->bytes)
+			free_bitmap(block_group, entry);
 	} else {
 		unlink_free_space(block_group, entry);
 		entry->offset += bytes;
@@ -1789,6 +1814,8 @@
 
 	ret = search_start;
 	bitmap_clear_bits(block_group, entry, ret, bytes);
+	if (entry->bytes == 0)
+		free_bitmap(block_group, entry);
 out:
 	spin_unlock(&cluster->lock);
 	spin_unlock(&block_group->tree_lock);
@@ -1842,15 +1869,26 @@
 		entry->offset += bytes;
 		entry->bytes -= bytes;
 
-		if (entry->bytes == 0) {
+		if (entry->bytes == 0)
 			rb_erase(&entry->offset_index, &cluster->root);
-			kfree(entry);
-		}
 		break;
 	}
 out:
 	spin_unlock(&cluster->lock);
 
+	if (!ret)
+		return 0;
+
+	spin_lock(&block_group->tree_lock);
+
+	block_group->free_space -= bytes;
+	if (entry->bytes == 0) {
+		block_group->free_extents--;
+		kfree(entry);
+	}
+
+	spin_unlock(&block_group->tree_lock);
+
 	return ret;
 }
 
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 160b55b..fb9bd78 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -416,7 +416,7 @@
 	}
 	if (start == 0) {
 		trans = btrfs_join_transaction(root, 1);
-		BUG_ON(!trans);
+		BUG_ON(IS_ERR(trans));
 		btrfs_set_trans_block_group(trans, inode);
 		trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
@@ -612,6 +612,7 @@
 			    GFP_NOFS);
 
 		trans = btrfs_join_transaction(root, 1);
+		BUG_ON(IS_ERR(trans));
 		ret = btrfs_reserve_extent(trans, root,
 					   async_extent->compressed_size,
 					   async_extent->compressed_size,
@@ -643,6 +644,7 @@
 					async_extent->ram_size - 1, 0);
 
 		em = alloc_extent_map(GFP_NOFS);
+		BUG_ON(!em);
 		em->start = async_extent->start;
 		em->len = async_extent->ram_size;
 		em->orig_start = em->start;
@@ -771,7 +773,7 @@
 
 	BUG_ON(root == root->fs_info->tree_root);
 	trans = btrfs_join_transaction(root, 1);
-	BUG_ON(!trans);
+	BUG_ON(IS_ERR(trans));
 	btrfs_set_trans_block_group(trans, inode);
 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
@@ -819,6 +821,7 @@
 		BUG_ON(ret);
 
 		em = alloc_extent_map(GFP_NOFS);
+		BUG_ON(!em);
 		em->start = start;
 		em->orig_start = em->start;
 		ram_size = ins.offset;
@@ -1049,7 +1052,7 @@
 	} else {
 		trans = btrfs_join_transaction(root, 1);
 	}
-	BUG_ON(!trans);
+	BUG_ON(IS_ERR(trans));
 
 	cow_start = (u64)-1;
 	cur_offset = start;
@@ -1168,6 +1171,7 @@
 			struct extent_map_tree *em_tree;
 			em_tree = &BTRFS_I(inode)->extent_tree;
 			em = alloc_extent_map(GFP_NOFS);
+			BUG_ON(!em);
 			em->start = cur_offset;
 			em->orig_start = em->start;
 			em->len = num_bytes;
@@ -1557,6 +1561,7 @@
 out_page:
 	unlock_page(page);
 	page_cache_release(page);
+	kfree(fixup);
 }
 
 /*
@@ -1703,7 +1708,7 @@
 				trans = btrfs_join_transaction_nolock(root, 1);
 			else
 				trans = btrfs_join_transaction(root, 1);
-			BUG_ON(!trans);
+			BUG_ON(IS_ERR(trans));
 			btrfs_set_trans_block_group(trans, inode);
 			trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 			ret = btrfs_update_inode(trans, root, inode);
@@ -1720,6 +1725,7 @@
 		trans = btrfs_join_transaction_nolock(root, 1);
 	else
 		trans = btrfs_join_transaction(root, 1);
+	BUG_ON(IS_ERR(trans));
 	btrfs_set_trans_block_group(trans, inode);
 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
@@ -2354,6 +2360,7 @@
 		 */
 		if (is_bad_inode(inode)) {
 			trans = btrfs_start_transaction(root, 0);
+			BUG_ON(IS_ERR(trans));
 			btrfs_orphan_del(trans, inode);
 			btrfs_end_transaction(trans, root);
 			iput(inode);
@@ -2381,6 +2388,7 @@
 
 	if (root->orphan_block_rsv || root->orphan_item_inserted) {
 		trans = btrfs_join_transaction(root, 1);
+		BUG_ON(IS_ERR(trans));
 		btrfs_end_transaction(trans, root);
 	}
 
@@ -2641,7 +2649,7 @@
 	path = btrfs_alloc_path();
 	if (!path) {
 		ret = -ENOMEM;
-		goto err;
+		goto out;
 	}
 
 	path->leave_spinning = 1;
@@ -2714,9 +2722,10 @@
 	struct extent_buffer *eb;
 	int level;
 	u64 refs = 1;
-	int uninitialized_var(ret);
 
 	for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
+		int ret;
+
 		if (!path->nodes[level])
 			break;
 		eb = path->nodes[level];
@@ -2727,7 +2736,7 @@
 		if (refs > 1)
 			return 1;
 	}
-	return ret; /* XXX callers? */
+	return 0;
 }
 
 /*
@@ -4134,7 +4143,7 @@
 	}
 	srcu_read_unlock(&root->fs_info->subvol_srcu, index);
 
-	if (root != sub_root) {
+	if (!IS_ERR(inode) && root != sub_root) {
 		down_read(&root->fs_info->cleanup_work_sem);
 		if (!(inode->i_sb->s_flags & MS_RDONLY))
 			btrfs_orphan_cleanup(sub_root);
@@ -4347,6 +4356,8 @@
 			trans = btrfs_join_transaction_nolock(root, 1);
 		else
 			trans = btrfs_join_transaction(root, 1);
+		if (IS_ERR(trans))
+			return PTR_ERR(trans);
 		btrfs_set_trans_block_group(trans, inode);
 		if (nolock)
 			ret = btrfs_end_transaction_nolock(trans, root);
@@ -4372,6 +4383,7 @@
 		return;
 
 	trans = btrfs_join_transaction(root, 1);
+	BUG_ON(IS_ERR(trans));
 	btrfs_set_trans_block_group(trans, inode);
 
 	ret = btrfs_update_inode(trans, root, inode);
@@ -5176,6 +5188,8 @@
 				em = NULL;
 				btrfs_release_path(root, path);
 				trans = btrfs_join_transaction(root, 1);
+				if (IS_ERR(trans))
+					return ERR_CAST(trans);
 				goto again;
 			}
 			map = kmap(page);
@@ -5280,8 +5294,8 @@
 	btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
 
 	trans = btrfs_join_transaction(root, 0);
-	if (!trans)
-		return ERR_PTR(-ENOMEM);
+	if (IS_ERR(trans))
+		return ERR_CAST(trans);
 
 	trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
@@ -5505,7 +5519,7 @@
 		 * while we look for nocow cross refs
 		 */
 		trans = btrfs_join_transaction(root, 0);
-		if (!trans)
+		if (IS_ERR(trans))
 			goto must_cow;
 
 		if (can_nocow_odirect(trans, inode, start, len) == 1) {
@@ -5640,7 +5654,7 @@
 	BUG_ON(!ordered);
 
 	trans = btrfs_join_transaction(root, 1);
-	if (!trans) {
+	if (IS_ERR(trans)) {
 		err = -ENOMEM;
 		goto out;
 	}
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a506a22..be2d4f6 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -203,7 +203,7 @@
 
 
 	trans = btrfs_join_transaction(root, 1);
-	BUG_ON(!trans);
+	BUG_ON(IS_ERR(trans));
 
 	ret = btrfs_update_inode(trans, root, inode);
 	BUG_ON(ret);
@@ -907,6 +907,10 @@
 
 	if (new_size > old_size) {
 		trans = btrfs_start_transaction(root, 0);
+		if (IS_ERR(trans)) {
+			ret = PTR_ERR(trans);
+			goto out_unlock;
+		}
 		ret = btrfs_grow_device(trans, device, new_size);
 		btrfs_commit_transaction(trans, root);
 	} else {
@@ -1898,7 +1902,10 @@
 
 			memcpy(&new_key, &key, sizeof(new_key));
 			new_key.objectid = inode->i_ino;
-			new_key.offset = key.offset + destoff - off;
+			if (off <= key.offset)
+				new_key.offset = key.offset + destoff - off;
+			else
+				new_key.offset = destoff;
 
 			trans = btrfs_start_transaction(root, 1);
 			if (IS_ERR(trans)) {
@@ -2082,7 +2089,7 @@
 
 	ret = -ENOMEM;
 	trans = btrfs_start_ioctl_transaction(root, 0);
-	if (!trans)
+	if (IS_ERR(trans))
 		goto out_drop;
 
 	file->private_data = trans;
@@ -2138,9 +2145,9 @@
 	path->leave_spinning = 1;
 
 	trans = btrfs_start_transaction(root, 1);
-	if (!trans) {
+	if (IS_ERR(trans)) {
 		btrfs_free_path(path);
-		return -ENOMEM;
+		return PTR_ERR(trans);
 	}
 
 	dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
@@ -2201,7 +2208,7 @@
 	int num_types = 4;
 	int alloc_size;
 	int ret = 0;
-	int slot_count = 0;
+	u64 slot_count = 0;
 	int i, c;
 
 	if (copy_from_user(&space_args,
@@ -2240,7 +2247,7 @@
 		goto out;
 	}
 
-	slot_count = min_t(int, space_args.space_slots, slot_count);
+	slot_count = min_t(u64, space_args.space_slots, slot_count);
 
 	alloc_size = sizeof(*dest) * slot_count;
 
@@ -2260,6 +2267,9 @@
 	for (i = 0; i < num_types; i++) {
 		struct btrfs_space_info *tmp;
 
+		if (!slot_count)
+			break;
+
 		info = NULL;
 		rcu_read_lock();
 		list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
@@ -2281,7 +2291,10 @@
 				memcpy(dest, &space, sizeof(space));
 				dest++;
 				space_args.total_spaces++;
+				slot_count--;
 			}
+			if (!slot_count)
+				break;
 		}
 		up_read(&info->groups_sem);
 	}
@@ -2334,6 +2347,8 @@
 	u64 transid;
 
 	trans = btrfs_start_transaction(root, 0);
+	if (IS_ERR(trans))
+		return PTR_ERR(trans);
 	transid = trans->transid;
 	btrfs_commit_transaction_async(trans, root, 0);
 
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 2b61e1d..083a554 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -141,7 +141,7 @@
 					  u64 file_offset)
 {
 	struct rb_root *root = &tree->tree;
-	struct rb_node *prev;
+	struct rb_node *prev = NULL;
 	struct rb_node *ret;
 	struct btrfs_ordered_extent *entry;
 
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 0d126be..fb2605d 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -260,6 +260,7 @@
 #else
 			BUG();
 #endif
+			break;
 		case BTRFS_BLOCK_GROUP_ITEM_KEY:
 			bi = btrfs_item_ptr(l, i,
 					    struct btrfs_block_group_item);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 045c9c2..0825e4e 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1157,6 +1157,7 @@
 	new_node->bytenr = dest->node->start;
 	new_node->level = node->level;
 	new_node->lowest = node->lowest;
+	new_node->checked = 1;
 	new_node->root = dest;
 
 	if (!node->lowest) {
@@ -2028,6 +2029,7 @@
 
 	while (1) {
 		trans = btrfs_start_transaction(root, 0);
+		BUG_ON(IS_ERR(trans));
 		trans->block_rsv = rc->block_rsv;
 
 		ret = btrfs_block_rsv_check(trans, root, rc->block_rsv,
@@ -2147,6 +2149,12 @@
 	}
 
 	trans = btrfs_join_transaction(rc->extent_root, 1);
+	if (IS_ERR(trans)) {
+		if (!err)
+			btrfs_block_rsv_release(rc->extent_root,
+						rc->block_rsv, num_bytes);
+		return PTR_ERR(trans);
+	}
 
 	if (!err) {
 		if (num_bytes != rc->merging_rsv_size) {
@@ -3222,6 +3230,7 @@
 	trans = btrfs_join_transaction(root, 0);
 	if (IS_ERR(trans)) {
 		btrfs_free_path(path);
+		ret = PTR_ERR(trans);
 		goto out;
 	}
 
@@ -3628,6 +3637,7 @@
 	set_reloc_control(rc);
 
 	trans = btrfs_join_transaction(rc->extent_root, 1);
+	BUG_ON(IS_ERR(trans));
 	btrfs_commit_transaction(trans, rc->extent_root);
 	return 0;
 }
@@ -3657,6 +3667,7 @@
 
 	while (1) {
 		trans = btrfs_start_transaction(rc->extent_root, 0);
+		BUG_ON(IS_ERR(trans));
 
 		if (update_backref_cache(trans, &rc->backref_cache)) {
 			btrfs_end_transaction(trans, rc->extent_root);
@@ -3804,7 +3815,10 @@
 
 	/* get rid of pinned extents */
 	trans = btrfs_join_transaction(rc->extent_root, 1);
-	btrfs_commit_transaction(trans, rc->extent_root);
+	if (IS_ERR(trans))
+		err = PTR_ERR(trans);
+	else
+		btrfs_commit_transaction(trans, rc->extent_root);
 out_free:
 	btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
 	btrfs_free_path(path);
@@ -4022,6 +4036,7 @@
 	int ret;
 
 	trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
+	BUG_ON(IS_ERR(trans));
 
 	memset(&root->root_item.drop_progress, 0,
 		sizeof(root->root_item.drop_progress));
@@ -4125,6 +4140,11 @@
 	set_reloc_control(rc);
 
 	trans = btrfs_join_transaction(rc->extent_root, 1);
+	if (IS_ERR(trans)) {
+		unset_reloc_control(rc);
+		err = PTR_ERR(trans);
+		goto out_free;
+	}
 
 	rc->merge_reloc_tree = 1;
 
@@ -4154,9 +4174,13 @@
 	unset_reloc_control(rc);
 
 	trans = btrfs_join_transaction(rc->extent_root, 1);
-	btrfs_commit_transaction(trans, rc->extent_root);
-out:
+	if (IS_ERR(trans))
+		err = PTR_ERR(trans);
+	else
+		btrfs_commit_transaction(trans, rc->extent_root);
+out_free:
 	kfree(rc);
+out:
 	while (!list_empty(&reloc_roots)) {
 		reloc_root = list_entry(reloc_roots.next,
 					struct btrfs_root, root_list);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index b2130c4..a004008 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -383,7 +383,7 @@
 		struct btrfs_fs_devices **fs_devices)
 {
 	substring_t args[MAX_OPT_ARGS];
-	char *opts, *p;
+	char *opts, *orig, *p;
 	int error = 0;
 	int intarg;
 
@@ -397,6 +397,7 @@
 	opts = kstrdup(options, GFP_KERNEL);
 	if (!opts)
 		return -ENOMEM;
+	orig = opts;
 
 	while ((p = strsep(&opts, ",")) != NULL) {
 		int token;
@@ -432,7 +433,7 @@
 	}
 
  out_free_opts:
-	kfree(opts);
+	kfree(orig);
  out:
 	/*
 	 * If no subvolume name is specified we use the default one.  Allocate
@@ -623,6 +624,8 @@
 	btrfs_wait_ordered_extents(root, 0, 0);
 
 	trans = btrfs_start_transaction(root, 0);
+	if (IS_ERR(trans))
+		return PTR_ERR(trans);
 	ret = btrfs_commit_transaction(trans, root);
 	return ret;
 }
@@ -761,6 +764,8 @@
 		}
 
 		btrfs_close_devices(fs_devices);
+		kfree(fs_info);
+		kfree(tree_root);
 	} else {
 		char b[BDEVNAME_SIZE];
 
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index bae5c7b..3d73c8d 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1161,6 +1161,11 @@
 	INIT_DELAYED_WORK(&ac->work, do_async_commit);
 	ac->root = root;
 	ac->newtrans = btrfs_join_transaction(root, 0);
+	if (IS_ERR(ac->newtrans)) {
+		int err = PTR_ERR(ac->newtrans);
+		kfree(ac);
+		return err;
+	}
 
 	/* take transaction reference */
 	mutex_lock(&root->fs_info->trans_mutex);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 054744a..a4bbb85 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -338,6 +338,12 @@
 		}
 		dst_copy = kmalloc(item_size, GFP_NOFS);
 		src_copy = kmalloc(item_size, GFP_NOFS);
+		if (!dst_copy || !src_copy) {
+			btrfs_release_path(root, path);
+			kfree(dst_copy);
+			kfree(src_copy);
+			return -ENOMEM;
+		}
 
 		read_extent_buffer(eb, src_copy, src_ptr, item_size);
 
@@ -665,6 +671,9 @@
 	btrfs_dir_item_key_to_cpu(leaf, di, &location);
 	name_len = btrfs_dir_name_len(leaf, di);
 	name = kmalloc(name_len, GFP_NOFS);
+	if (!name)
+		return -ENOMEM;
+
 	read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
 	btrfs_release_path(root, path);
 
@@ -744,6 +753,9 @@
 	int match = 0;
 
 	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
 	ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
 	if (ret != 0)
 		goto out;
@@ -967,6 +979,8 @@
 	key.offset = (u64)-1;
 
 	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
 
 	while (1) {
 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
@@ -1178,6 +1192,9 @@
 
 	name_len = btrfs_dir_name_len(eb, di);
 	name = kmalloc(name_len, GFP_NOFS);
+	if (!name)
+		return -ENOMEM;
+
 	log_type = btrfs_dir_type(eb, di);
 	read_extent_buffer(eb, name, (unsigned long)(di + 1),
 		   name_len);
@@ -1692,6 +1709,8 @@
 		root_owner = btrfs_header_owner(parent);
 
 		next = btrfs_find_create_tree_block(root, bytenr, blocksize);
+		if (!next)
+			return -ENOMEM;
 
 		if (*level == 1) {
 			wc->process_func(root, next, wc, ptr_gen);
@@ -2032,6 +2051,7 @@
 		wait_log_commit(trans, log_root_tree,
 				log_root_tree->log_transid);
 		mutex_unlock(&log_root_tree->log_mutex);
+		ret = 0;
 		goto out;
 	}
 	atomic_set(&log_root_tree->log_commit[index2], 1);
@@ -2096,7 +2116,7 @@
 	smp_mb();
 	if (waitqueue_active(&root->log_commit_wait[index1]))
 		wake_up(&root->log_commit_wait[index1]);
-	return 0;
+	return ret;
 }
 
 static void free_log_tree(struct btrfs_trans_handle *trans,
@@ -2194,6 +2214,9 @@
 
 	log = root->log_root;
 	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
+
 	di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino,
 				   name, name_len, -1);
 	if (IS_ERR(di)) {
@@ -2594,6 +2617,9 @@
 
 	ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
 			   nr * sizeof(u32), GFP_NOFS);
+	if (!ins_data)
+		return -ENOMEM;
+
 	ins_sizes = (u32 *)ins_data;
 	ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
 
@@ -2725,7 +2751,13 @@
 	log = root->log_root;
 
 	path = btrfs_alloc_path();
+	if (!path)
+		return -ENOMEM;
 	dst_path = btrfs_alloc_path();
+	if (!dst_path) {
+		btrfs_free_path(path);
+		return -ENOMEM;
+	}
 
 	min_key.objectid = inode->i_ino;
 	min_key.type = BTRFS_INODE_ITEM_KEY;
@@ -3080,6 +3112,7 @@
 	BUG_ON(!path);
 
 	trans = btrfs_start_transaction(fs_info->tree_root, 0);
+	BUG_ON(IS_ERR(trans));
 
 	wc.trans = trans;
 	wc.pin = 1;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index d158530..af7dbca 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1213,6 +1213,10 @@
 		return -ENOMEM;
 
 	trans = btrfs_start_transaction(root, 0);
+	if (IS_ERR(trans)) {
+		btrfs_free_path(path);
+		return PTR_ERR(trans);
+	}
 	key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
 	key.type = BTRFS_DEV_ITEM_KEY;
 	key.offset = device->devid;
@@ -1601,11 +1605,19 @@
 
 	ret = find_next_devid(root, &device->devid);
 	if (ret) {
+		kfree(device->name);
 		kfree(device);
 		goto error;
 	}
 
 	trans = btrfs_start_transaction(root, 0);
+	if (IS_ERR(trans)) {
+		kfree(device->name);
+		kfree(device);
+		ret = PTR_ERR(trans);
+		goto error;
+	}
+
 	lock_chunks(root);
 
 	device->writeable = 1;
@@ -1873,7 +1885,7 @@
 		return ret;
 
 	trans = btrfs_start_transaction(root, 0);
-	BUG_ON(!trans);
+	BUG_ON(IS_ERR(trans));
 
 	lock_chunks(root);
 
@@ -2047,7 +2059,7 @@
 		BUG_ON(ret);
 
 		trans = btrfs_start_transaction(dev_root, 0);
-		BUG_ON(!trans);
+		BUG_ON(IS_ERR(trans));
 
 		ret = btrfs_grow_device(trans, device, old_size);
 		BUG_ON(ret);
@@ -2213,6 +2225,11 @@
 
 	/* Shrinking succeeded, else we would be at "done". */
 	trans = btrfs_start_transaction(root, 0);
+	if (IS_ERR(trans)) {
+		ret = PTR_ERR(trans);
+		goto done;
+	}
+
 	lock_chunks(root);
 
 	device->disk_total_bytes = new_size;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 60d27bc..6b61ded 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1560,9 +1560,10 @@
 		/* NOTE: no side-effects allowed, until we take s_mutex */
 
 		revoking = cap->implemented & ~cap->issued;
-		if (revoking)
-			dout(" mds%d revoking %s\n", cap->mds,
-			     ceph_cap_string(revoking));
+		dout(" mds%d cap %p issued %s implemented %s revoking %s\n",
+		     cap->mds, cap, ceph_cap_string(cap->issued),
+		     ceph_cap_string(cap->implemented),
+		     ceph_cap_string(revoking));
 
 		if (cap == ci->i_auth_cap &&
 		    (cap->issued & CEPH_CAP_FILE_WR)) {
@@ -1658,6 +1659,8 @@
 
 		if (cap == ci->i_auth_cap && ci->i_dirty_caps)
 			flushing = __mark_caps_flushing(inode, session);
+		else
+			flushing = 0;
 
 		mds = cap->mds;  /* remember mds, so we don't repeat */
 		sent++;
@@ -1940,6 +1943,35 @@
 	}
 }
 
+static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
+				     struct ceph_mds_session *session,
+				     struct inode *inode)
+{
+	struct ceph_inode_info *ci = ceph_inode(inode);
+	struct ceph_cap *cap;
+	int delayed = 0;
+
+	spin_lock(&inode->i_lock);
+	cap = ci->i_auth_cap;
+	dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode,
+	     ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq);
+	__ceph_flush_snaps(ci, &session, 1);
+	if (ci->i_flushing_caps) {
+		delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
+				     __ceph_caps_used(ci),
+				     __ceph_caps_wanted(ci),
+				     cap->issued | cap->implemented,
+				     ci->i_flushing_caps, NULL);
+		if (delayed) {
+			spin_lock(&inode->i_lock);
+			__cap_delay_requeue(mdsc, ci);
+			spin_unlock(&inode->i_lock);
+		}
+	} else {
+		spin_unlock(&inode->i_lock);
+	}
+}
+
 
 /*
  * Take references to capabilities we hold, so that we don't release
@@ -2687,7 +2719,7 @@
 	ceph_add_cap(inode, session, cap_id, -1,
 		     issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH,
 		     NULL /* no caps context */);
-	try_flush_caps(inode, session, NULL);
+	kick_flushing_inode_caps(mdsc, session, inode);
 	up_read(&mdsc->snap_rwsem);
 
 	/* make sure we re-request max_size, if necessary */
@@ -2785,8 +2817,7 @@
 	case CEPH_CAP_OP_IMPORT:
 		handle_cap_import(mdsc, inode, h, session,
 				  snaptrace, snaptrace_len);
-		ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY,
-				session);
+		ceph_check_caps(ceph_inode(inode), 0, session);
 		goto done_unlocked;
 	}
 
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index e835eff..5625463 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -710,10 +710,6 @@
 			ci->i_ceph_flags |= CEPH_I_COMPLETE;
 			ci->i_max_offset = 2;
 		}
-
-		/* it may be better to set st_size in getattr instead? */
-		if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), RBYTES))
-			inode->i_size = ci->i_rbytes;
 		break;
 	default:
 		pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
@@ -1819,7 +1815,11 @@
 		else
 			stat->dev = 0;
 		if (S_ISDIR(inode->i_mode)) {
-			stat->size = ci->i_rbytes;
+			if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
+						RBYTES))
+				stat->size = ci->i_rbytes;
+			else
+				stat->size = ci->i_files + ci->i_subdirs;
 			stat->blocks = 0;
 			stat->blksize = 65536;
 		}
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 1e30d19..a1ee8fa 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -693,9 +693,11 @@
 				dout("choose_mds %p %llx.%llx "
 				     "frag %u mds%d (%d/%d)\n",
 				     inode, ceph_vinop(inode),
-				     frag.frag, frag.mds,
+				     frag.frag, mds,
 				     (int)r, frag.ndist);
-				return mds;
+				if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
+				    CEPH_MDS_STATE_ACTIVE)
+					return mds;
 			}
 
 			/* since this file/dir wasn't known to be
@@ -708,7 +710,9 @@
 				dout("choose_mds %p %llx.%llx "
 				     "frag %u mds%d (auth)\n",
 				     inode, ceph_vinop(inode), frag.frag, mds);
-				return mds;
+				if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
+				    CEPH_MDS_STATE_ACTIVE)
+					return mds;
 			}
 		}
 	}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index bf6f0f3..9c50854 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -290,6 +290,8 @@
 
         fsopt->rsize = CEPH_MOUNT_RSIZE_DEFAULT;
         fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
+	fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
+	fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
         fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
         fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
         fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 6e12a6b..8c9eba6 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -219,6 +219,7 @@
 	struct rb_node **p;
 	struct rb_node *parent = NULL;
 	struct ceph_inode_xattr *xattr = NULL;
+	int name_len = strlen(name);
 	int c;
 
 	p = &ci->i_xattrs.index.rb_node;
@@ -226,6 +227,8 @@
 		parent = *p;
 		xattr = rb_entry(parent, struct ceph_inode_xattr, node);
 		c = strncmp(name, xattr->name, xattr->name_len);
+		if (c == 0 && name_len > xattr->name_len)
+			c = 1;
 		if (c < 0)
 			p = &(*p)->rb_left;
 		else if (c > 0)
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index ee45648..7cb0f7f 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -3,6 +3,7 @@
 	depends on INET
 	select NLS
 	select CRYPTO
+	select CRYPTO_MD4
 	select CRYPTO_MD5
 	select CRYPTO_HMAC
 	select CRYPTO_ARC4
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 43b19dd..d875584 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -5,7 +5,7 @@
 
 cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \
 	  link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o \
-	  md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o \
+	  cifs_unicode.o nterr.o xattr.o cifsencrypt.o \
 	  readdir.o ioctl.o sess.o export.o
 
 cifs-$(CONFIG_CIFS_ACL) += cifsacl.o
diff --git a/fs/cifs/README b/fs/cifs/README
index 46af99a..fe16835 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -452,6 +452,11 @@
 		if oplock (caching token) is granted and held. Note that
 		direct allows write operations larger than page size
 		to be sent to the server.
+  strictcache   Use for switching on strict cache mode. In this mode the
+		client read from the cache all the time it has Oplock Level II,
+		otherwise - read from the server. All written data are stored
+		in the cache, but if the client doesn't have Exclusive Oplock,
+		it writes the data to the server.
   acl   	Allow setfacl and getfacl to manage posix ACLs if server
 		supports them.  (default)
   noacl 	Do not allow setfacl and getfacl calls on this mount
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index ede9830..65829d3 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -79,11 +79,11 @@
 	spin_lock(&GlobalMid_Lock);
 	list_for_each(tmp, &server->pending_mid_q) {
 		mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
-		cERROR(1, "State: %d Cmd: %d Pid: %d Tsk: %p Mid %d",
+		cERROR(1, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %d",
 			mid_entry->midState,
 			(int)mid_entry->command,
 			mid_entry->pid,
-			mid_entry->tsk,
+			mid_entry->callback_data,
 			mid_entry->mid);
 #ifdef CONFIG_CIFS_STATS2
 		cERROR(1, "IsLarge: %d buf: %p time rcv: %ld now: %ld",
@@ -218,11 +218,11 @@
 				mid_entry = list_entry(tmp3, struct mid_q_entry,
 					qhead);
 				seq_printf(m, "\tState: %d com: %d pid:"
-						" %d tsk: %p mid %d\n",
+						" %d cbdata: %p mid %d\n",
 						mid_entry->midState,
 						(int)mid_entry->command,
 						mid_entry->pid,
-						mid_entry->tsk,
+						mid_entry->callback_data,
 						mid_entry->mid);
 			}
 			spin_unlock(&GlobalMid_Lock);
@@ -331,7 +331,7 @@
 				atomic_read(&totSmBufAllocCount));
 #endif /* CONFIG_CIFS_STATS2 */
 
-	seq_printf(m, "Operations (MIDs): %d\n", midCount.counter);
+	seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
 	seq_printf(m,
 		"\n%d session %d share reconnects\n",
 		tcpSesReconnectCount.counter, tconInfoReconnectCount.counter);
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 7ed3653..0a265ad 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -282,8 +282,6 @@
 	cFYI(1, "in %s", __func__);
 	BUG_ON(IS_ROOT(mntpt));
 
-	xid = GetXid();
-
 	/*
 	 * The MSDFS spec states that paths in DFS referral requests and
 	 * responses must be prefixed by a single '\' character instead of
@@ -293,20 +291,21 @@
 	mnt = ERR_PTR(-ENOMEM);
 	full_path = build_path_from_dentry(mntpt);
 	if (full_path == NULL)
-		goto free_xid;
+		goto cdda_exit;
 
 	cifs_sb = CIFS_SB(mntpt->d_inode->i_sb);
 	tlink = cifs_sb_tlink(cifs_sb);
-	mnt = ERR_PTR(-EINVAL);
 	if (IS_ERR(tlink)) {
 		mnt = ERR_CAST(tlink);
 		goto free_full_path;
 	}
 	ses = tlink_tcon(tlink)->ses;
 
+	xid = GetXid();
 	rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls,
 		&num_referrals, &referrals,
 		cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+	FreeXid(xid);
 
 	cifs_put_tlink(tlink);
 
@@ -339,8 +338,7 @@
 	free_dfs_info_array(referrals, num_referrals);
 free_full_path:
 	kfree(full_path);
-free_xid:
-	FreeXid(xid);
+cdda_exit:
 	cFYI(1, "leaving %s" , __func__);
 	return mnt;
 }
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 7852cd6..ac51cd2 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -40,6 +40,7 @@
 #define CIFS_MOUNT_FSCACHE	0x8000 /* local caching enabled */
 #define CIFS_MOUNT_MF_SYMLINKS	0x10000 /* Minshall+French Symlinks enabled */
 #define CIFS_MOUNT_MULTIUSER	0x20000 /* multiuser mount */
+#define CIFS_MOUNT_STRICT_IO	0x40000 /* strict cache mode */
 
 struct cifs_sb_info {
 	struct rb_root tlink_tree;
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 430f510..fc0fd4f 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -44,10 +44,14 @@
 	int charlen, outlen = 0;
 	int maxwords = maxbytes / 2;
 	char tmp[NLS_MAX_CHARSET_SIZE];
+	__u16 ftmp;
 
-	for (i = 0; i < maxwords && from[i]; i++) {
-		charlen = codepage->uni2char(le16_to_cpu(from[i]), tmp,
-					     NLS_MAX_CHARSET_SIZE);
+	for (i = 0; i < maxwords; i++) {
+		ftmp = get_unaligned_le16(&from[i]);
+		if (ftmp == 0)
+			break;
+
+		charlen = codepage->uni2char(ftmp, tmp, NLS_MAX_CHARSET_SIZE);
 		if (charlen > 0)
 			outlen += charlen;
 		else
@@ -58,9 +62,9 @@
 }
 
 /*
- * cifs_mapchar - convert a little-endian char to proper char in codepage
+ * cifs_mapchar - convert a host-endian char to proper char in codepage
  * @target - where converted character should be copied
- * @src_char - 2 byte little-endian source character
+ * @src_char - 2 byte host-endian source character
  * @cp - codepage to which character should be converted
  * @mapchar - should character be mapped according to mapchars mount option?
  *
@@ -69,7 +73,7 @@
  * enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE).
  */
 static int
-cifs_mapchar(char *target, const __le16 src_char, const struct nls_table *cp,
+cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
 	     bool mapchar)
 {
 	int len = 1;
@@ -82,7 +86,7 @@
 	 *     build_path_from_dentry are modified, as they use slash as
 	 *     separator.
 	 */
-	switch (le16_to_cpu(src_char)) {
+	switch (src_char) {
 	case UNI_COLON:
 		*target = ':';
 		break;
@@ -109,8 +113,7 @@
 	return len;
 
 cp_convert:
-	len = cp->uni2char(le16_to_cpu(src_char), target,
-			   NLS_MAX_CHARSET_SIZE);
+	len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE);
 	if (len <= 0) {
 		*target = '?';
 		len = 1;
@@ -149,6 +152,7 @@
 	int nullsize = nls_nullsize(codepage);
 	int fromwords = fromlen / 2;
 	char tmp[NLS_MAX_CHARSET_SIZE];
+	__u16 ftmp;
 
 	/*
 	 * because the chars can be of varying widths, we need to take care
@@ -158,19 +162,23 @@
 	 */
 	safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize);
 
-	for (i = 0; i < fromwords && from[i]; i++) {
+	for (i = 0; i < fromwords; i++) {
+		ftmp = get_unaligned_le16(&from[i]);
+		if (ftmp == 0)
+			break;
+
 		/*
 		 * check to see if converting this character might make the
 		 * conversion bleed into the null terminator
 		 */
 		if (outlen >= safelen) {
-			charlen = cifs_mapchar(tmp, from[i], codepage, mapchar);
+			charlen = cifs_mapchar(tmp, ftmp, codepage, mapchar);
 			if ((outlen + charlen) > (tolen - nullsize))
 				break;
 		}
 
 		/* put converted char into 'to' buffer */
-		charlen = cifs_mapchar(&to[outlen], from[i], codepage, mapchar);
+		charlen = cifs_mapchar(&to[outlen], ftmp, codepage, mapchar);
 		outlen += charlen;
 	}
 
@@ -193,24 +201,21 @@
 {
 	int charlen;
 	int i;
-	wchar_t *wchar_to = (wchar_t *)to; /* needed to quiet sparse */
+	wchar_t wchar_to; /* needed to quiet sparse */
 
 	for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
-
-		/* works for 2.4.0 kernel or later */
-		charlen = codepage->char2uni(from, len, &wchar_to[i]);
+		charlen = codepage->char2uni(from, len, &wchar_to);
 		if (charlen < 1) {
-			cERROR(1, "strtoUCS: char2uni of %d returned %d",
-				(int)*from, charlen);
+			cERROR(1, "strtoUCS: char2uni of 0x%x returned %d",
+				*from, charlen);
 			/* A question mark */
-			to[i] = cpu_to_le16(0x003f);
+			wchar_to = 0x003f;
 			charlen = 1;
-		} else
-			to[i] = cpu_to_le16(wchar_to[i]);
-
+		}
+		put_unaligned_le16(wchar_to, &to[i]);
 	}
 
-	to[i] = 0;
+	put_unaligned_le16(0, &to[i]);
 	return i;
 }
 
@@ -252,3 +257,79 @@
 	return dst;
 }
 
+/*
+ * Convert 16 bit Unicode pathname to wire format from string in current code
+ * page. Conversion may involve remapping up the six characters that are
+ * only legal in POSIX-like OS (if they are present in the string). Path
+ * names are little endian 16 bit Unicode on the wire
+ */
+int
+cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
+		 const struct nls_table *cp, int mapChars)
+{
+	int i, j, charlen;
+	int len_remaining = maxlen;
+	char src_char;
+	__u16 temp;
+
+	if (!mapChars)
+		return cifs_strtoUCS(target, source, PATH_MAX, cp);
+
+	for (i = 0, j = 0; i < maxlen; j++) {
+		src_char = source[i];
+		switch (src_char) {
+		case 0:
+			put_unaligned_le16(0, &target[j]);
+			goto ctoUCS_out;
+		case ':':
+			temp = UNI_COLON;
+			break;
+		case '*':
+			temp = UNI_ASTERIK;
+			break;
+		case '?':
+			temp = UNI_QUESTION;
+			break;
+		case '<':
+			temp = UNI_LESSTHAN;
+			break;
+		case '>':
+			temp = UNI_GRTRTHAN;
+			break;
+		case '|':
+			temp = UNI_PIPE;
+			break;
+		/*
+		 * FIXME: We can not handle remapping backslash (UNI_SLASH)
+		 * until all the calls to build_path_from_dentry are modified,
+		 * as they use backslash as separator.
+		 */
+		default:
+			charlen = cp->char2uni(source+i, len_remaining,
+						&temp);
+			/*
+			 * if no match, use question mark, which at least in
+			 * some cases serves as wild card
+			 */
+			if (charlen < 1) {
+				temp = 0x003f;
+				charlen = 1;
+			}
+			len_remaining -= charlen;
+			/*
+			 * character may take more than one byte in the source
+			 * string, but will take exactly two bytes in the
+			 * target string
+			 */
+			i += charlen;
+			continue;
+		}
+		put_unaligned_le16(temp, &target[j]);
+		i++; /* move to next char in source string */
+		len_remaining--;
+	}
+
+ctoUCS_out:
+	return i;
+}
+
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index a437ec3..beeebf1 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -41,9 +41,12 @@
 ;
 
 
-/* security id for everyone */
+/* security id for everyone/world system group */
 static const struct cifs_sid sid_everyone = {
 	1, 1, {0, 0, 0, 0, 0, 1}, {0} };
+/* security id for Authenticated Users system group */
+static const struct cifs_sid sid_authusers = {
+	1, 1, {0, 0, 0, 0, 0, 5}, {11} };
 /* group users */
 static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
 
@@ -365,10 +368,14 @@
 	if (num_aces  > 0) {
 		umode_t user_mask = S_IRWXU;
 		umode_t group_mask = S_IRWXG;
-		umode_t other_mask = S_IRWXO;
+		umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
 
 		ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
 				GFP_KERNEL);
+		if (!ppace) {
+			cERROR(1, "DACL memory allocation error");
+			return;
+		}
 
 		for (i = 0; i < num_aces; ++i) {
 			ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
@@ -390,6 +397,12 @@
 						     ppace[i]->type,
 						     &fattr->cf_mode,
 						     &other_mask);
+			if (compare_sids(&(ppace[i]->sid), &sid_authusers))
+				access_flags_to_mode(ppace[i]->access_req,
+						     ppace[i]->type,
+						     &fattr->cf_mode,
+						     &other_mask);
+
 
 /*			memcpy((void *)(&(cifscred->aces[i])),
 				(void *)ppace[i],
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 66f3d50..a51585f 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -24,7 +24,6 @@
 #include "cifspdu.h"
 #include "cifsglob.h"
 #include "cifs_debug.h"
-#include "md5.h"
 #include "cifs_unicode.h"
 #include "cifsproto.h"
 #include "ntlmssp.h"
@@ -37,11 +36,6 @@
 /* Note that the smb header signature field on input contains the
 	sequence number before this function is called */
 
-extern void mdfour(unsigned char *out, unsigned char *in, int n);
-extern void E_md4hash(const unsigned char *passwd, unsigned char *p16);
-extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8,
-		       unsigned char *p24);
-
 static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
 				struct TCP_Server_Info *server, char *signature)
 {
@@ -234,6 +228,7 @@
 /* first calculate 24 bytes ntlm response and then 16 byte session key */
 int setup_ntlm_response(struct cifsSesInfo *ses)
 {
+	int rc = 0;
 	unsigned int temp_len = CIFS_SESS_KEY_SIZE + CIFS_AUTH_RESP_SIZE;
 	char temp_key[CIFS_SESS_KEY_SIZE];
 
@@ -247,13 +242,26 @@
 	}
 	ses->auth_key.len = temp_len;
 
-	SMBNTencrypt(ses->password, ses->server->cryptkey,
+	rc = SMBNTencrypt(ses->password, ses->server->cryptkey,
 			ses->auth_key.response + CIFS_SESS_KEY_SIZE);
+	if (rc) {
+		cFYI(1, "%s Can't generate NTLM response, error: %d",
+			__func__, rc);
+		return rc;
+	}
 
-	E_md4hash(ses->password, temp_key);
-	mdfour(ses->auth_key.response, temp_key, CIFS_SESS_KEY_SIZE);
+	rc = E_md4hash(ses->password, temp_key);
+	if (rc) {
+		cFYI(1, "%s Can't generate NT hash, error: %d", __func__, rc);
+		return rc;
+	}
 
-	return 0;
+	rc = mdfour(ses->auth_key.response, temp_key, CIFS_SESS_KEY_SIZE);
+	if (rc)
+		cFYI(1, "%s Can't generate NTLM session key, error: %d",
+			__func__, rc);
+
+	return rc;
 }
 
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
@@ -649,9 +657,10 @@
 	get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE);
 
 	tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
-	if (!tfm_arc4 || IS_ERR(tfm_arc4)) {
+	if (IS_ERR(tfm_arc4)) {
+		rc = PTR_ERR(tfm_arc4);
 		cERROR(1, "could not allocate crypto API arc4\n");
-		return PTR_ERR(tfm_arc4);
+		return rc;
 	}
 
 	desc.tfm = tfm_arc4;
@@ -700,14 +709,13 @@
 	unsigned int size;
 
 	server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
-	if (!server->secmech.hmacmd5 ||
-			IS_ERR(server->secmech.hmacmd5)) {
+	if (IS_ERR(server->secmech.hmacmd5)) {
 		cERROR(1, "could not allocate crypto hmacmd5\n");
 		return PTR_ERR(server->secmech.hmacmd5);
 	}
 
 	server->secmech.md5 = crypto_alloc_shash("md5", 0, 0);
-	if (!server->secmech.md5 || IS_ERR(server->secmech.md5)) {
+	if (IS_ERR(server->secmech.md5)) {
 		cERROR(1, "could not allocate crypto md5\n");
 		rc = PTR_ERR(server->secmech.md5);
 		goto crypto_allocate_md5_fail;
diff --git a/fs/cifs/cifsencrypt.h b/fs/cifs/cifsencrypt.h
deleted file mode 100644
index 15d2ec0..0000000
--- a/fs/cifs/cifsencrypt.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- *   fs/cifs/cifsencrypt.h
- *
- *   Copyright (c) International Business Machines  Corp., 2005
- *   Author(s): Steve French (sfrench@us.ibm.com)
- *
- *   Externs for misc. small encryption routines
- *   so we do not have to put them in cifsproto.h
- *
- *   This library is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU Lesser General Public License as published
- *   by the Free Software Foundation; either version 2.1 of the License, or
- *   (at your option) any later version.
- *
- *   This library is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
- *   the GNU Lesser General Public License for more details.
- *
- *   You should have received a copy of the GNU Lesser General Public License
- *   along with this library; if not, write to the Free Software
- *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-/* md4.c */
-extern void mdfour(unsigned char *out, unsigned char *in, int n);
-/* smbdes.c */
-extern void E_P16(unsigned char *p14, unsigned char *p16);
-extern void E_P24(unsigned char *p21, const unsigned char *c8,
-		  unsigned char *p24);
-
-
-
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index d9f652a..f297013 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -77,7 +77,11 @@
 module_param(cifs_max_pending, int, 0);
 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
 				   "Default: 50 Range: 2 to 256");
-
+unsigned short echo_retries = 5;
+module_param(echo_retries, ushort, 0644);
+MODULE_PARM_DESC(echo_retries, "Number of echo attempts before giving up and "
+			       "reconnecting server. Default: 5. 0 means "
+			       "never reconnect.");
 extern mempool_t *cifs_sm_req_poolp;
 extern mempool_t *cifs_req_poolp;
 extern mempool_t *cifs_mid_poolp;
@@ -596,10 +600,17 @@
 {
 	struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
 	ssize_t written;
+	int rc;
 
 	written = generic_file_aio_write(iocb, iov, nr_segs, pos);
-	if (!CIFS_I(inode)->clientCanCacheAll)
-		filemap_fdatawrite(inode->i_mapping);
+
+	if (CIFS_I(inode)->clientCanCacheAll)
+		return written;
+
+	rc = filemap_fdatawrite(inode->i_mapping);
+	if (rc)
+		cFYI(1, "cifs_file_aio_write: %d rc on %p inode", rc, inode);
+
 	return written;
 }
 
@@ -729,6 +740,25 @@
 	.setlease = cifs_setlease,
 };
 
+const struct file_operations cifs_file_strict_ops = {
+	.read = do_sync_read,
+	.write = do_sync_write,
+	.aio_read = cifs_strict_readv,
+	.aio_write = cifs_strict_writev,
+	.open = cifs_open,
+	.release = cifs_close,
+	.lock = cifs_lock,
+	.fsync = cifs_strict_fsync,
+	.flush = cifs_flush,
+	.mmap = cifs_file_strict_mmap,
+	.splice_read = generic_file_splice_read,
+	.llseek = cifs_llseek,
+#ifdef CONFIG_CIFS_POSIX
+	.unlocked_ioctl	= cifs_ioctl,
+#endif /* CONFIG_CIFS_POSIX */
+	.setlease = cifs_setlease,
+};
+
 const struct file_operations cifs_file_direct_ops = {
 	/* no aio, no readv -
 	   BB reevaluate whether they can be done with directio, no cache */
@@ -747,6 +777,7 @@
 	.llseek = cifs_llseek,
 	.setlease = cifs_setlease,
 };
+
 const struct file_operations cifs_file_nobrl_ops = {
 	.read = do_sync_read,
 	.write = do_sync_write,
@@ -765,6 +796,24 @@
 	.setlease = cifs_setlease,
 };
 
+const struct file_operations cifs_file_strict_nobrl_ops = {
+	.read = do_sync_read,
+	.write = do_sync_write,
+	.aio_read = cifs_strict_readv,
+	.aio_write = cifs_strict_writev,
+	.open = cifs_open,
+	.release = cifs_close,
+	.fsync = cifs_strict_fsync,
+	.flush = cifs_flush,
+	.mmap = cifs_file_strict_mmap,
+	.splice_read = generic_file_splice_read,
+	.llseek = cifs_llseek,
+#ifdef CONFIG_CIFS_POSIX
+	.unlocked_ioctl	= cifs_ioctl,
+#endif /* CONFIG_CIFS_POSIX */
+	.setlease = cifs_setlease,
+};
+
 const struct file_operations cifs_file_direct_nobrl_ops = {
 	/* no mmap, no aio, no readv -
 	   BB reevaluate whether they can be done with directio, no cache */
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 851030f..4a33302 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -61,6 +61,7 @@
 		       struct dentry *);
 extern int cifs_revalidate_file(struct file *filp);
 extern int cifs_revalidate_dentry(struct dentry *);
+extern void cifs_invalidate_mapping(struct inode *inode);
 extern int cifs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 extern int cifs_setattr(struct dentry *, struct iattr *);
 
@@ -72,19 +73,27 @@
 /* Functions related to files and directories */
 extern const struct file_operations cifs_file_ops;
 extern const struct file_operations cifs_file_direct_ops; /* if directio mnt */
-extern const struct file_operations cifs_file_nobrl_ops;
-extern const struct file_operations cifs_file_direct_nobrl_ops; /* no brlocks */
+extern const struct file_operations cifs_file_strict_ops; /* if strictio mnt */
+extern const struct file_operations cifs_file_nobrl_ops; /* no brlocks */
+extern const struct file_operations cifs_file_direct_nobrl_ops;
+extern const struct file_operations cifs_file_strict_nobrl_ops;
 extern int cifs_open(struct inode *inode, struct file *file);
 extern int cifs_close(struct inode *inode, struct file *file);
 extern int cifs_closedir(struct inode *inode, struct file *file);
 extern ssize_t cifs_user_read(struct file *file, char __user *read_data,
-			 size_t read_size, loff_t *poffset);
+			      size_t read_size, loff_t *poffset);
+extern ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
+				 unsigned long nr_segs, loff_t pos);
 extern ssize_t cifs_user_write(struct file *file, const char __user *write_data,
-			 size_t write_size, loff_t *poffset);
+			       size_t write_size, loff_t *poffset);
+extern ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
+				  unsigned long nr_segs, loff_t pos);
 extern int cifs_lock(struct file *, int, struct file_lock *);
 extern int cifs_fsync(struct file *, int);
+extern int cifs_strict_fsync(struct file *, int);
 extern int cifs_flush(struct file *, fl_owner_t id);
 extern int cifs_file_mmap(struct file * , struct vm_area_struct *);
+extern int cifs_file_strict_mmap(struct file * , struct vm_area_struct *);
 extern const struct file_operations cifs_dir_ops;
 extern int cifs_dir_open(struct inode *inode, struct file *file);
 extern int cifs_readdir(struct file *file, void *direntry, filldir_t filldir);
@@ -118,5 +127,5 @@
 extern const struct export_operations cifs_export_ops;
 #endif /* EXPERIMENTAL */
 
-#define CIFS_VERSION   "1.68"
+#define CIFS_VERSION   "1.70"
 #endif				/* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 606ca8b..17afb0f 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -161,46 +161,41 @@
 	int srv_count; /* reference counter */
 	/* 15 character server name + 0x20 16th byte indicating type = srv */
 	char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
+	enum statusEnum tcpStatus; /* what we think the status is */
 	char *hostname; /* hostname portion of UNC string */
 	struct socket *ssocket;
 	struct sockaddr_storage dstaddr;
 	struct sockaddr_storage srcaddr; /* locally bind to this IP */
+#ifdef CONFIG_NET_NS
+	struct net *net;
+#endif
 	wait_queue_head_t response_q;
 	wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
 	struct list_head pending_mid_q;
-	void *Server_NlsInfo;	/* BB - placeholder for future NLS info  */
-	unsigned short server_codepage;	/* codepage for the server    */
-	enum protocolEnum protocolType;
-	char versionMajor;
-	char versionMinor;
-	bool svlocal:1;			/* local server or remote */
 	bool noblocksnd;		/* use blocking sendmsg */
 	bool noautotune;		/* do not autotune send buf sizes */
 	bool tcp_nodelay;
 	atomic_t inFlight;  /* number of requests on the wire to server */
-#ifdef CONFIG_CIFS_STATS2
-	atomic_t inSend; /* requests trying to send */
-	atomic_t num_waiters;   /* blocked waiting to get in sendrecv */
-#endif
-	enum statusEnum tcpStatus; /* what we think the status is */
 	struct mutex srv_mutex;
 	struct task_struct *tsk;
 	char server_GUID[16];
 	char secMode;
+	bool session_estab; /* mark when very first sess is established */
+	u16 dialect; /* dialect index that server chose */
 	enum securityEnum secType;
 	unsigned int maxReq;	/* Clients should submit no more */
 	/* than maxReq distinct unanswered SMBs to the server when using  */
 	/* multiplexed reads or writes */
 	unsigned int maxBuf;	/* maxBuf specifies the maximum */
 	/* message size the server can send or receive for non-raw SMBs */
+	/* maxBuf is returned by SMB NegotiateProtocol so maxBuf is only 0 */
+	/* when socket is setup (and during reconnect) before NegProt sent */
 	unsigned int max_rw;	/* maxRw specifies the maximum */
 	/* message size the server can send or receive for */
 	/* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */
 	unsigned int max_vcs;	/* maximum number of smb sessions, at least
 				   those that can be specified uniquely with
 				   vcnumbers */
-	char sessid[4];		/* unique token id for this session */
-	/* (returned on Negotiate */
 	int capabilities; /* allow selective disabling of caps by smb sess */
 	int timeAdj;  /* Adjust for difference in server time zone in sec */
 	__u16 CurrentMid;         /* multiplex id - rotating counter */
@@ -210,20 +205,53 @@
 	__u32 sequence_number; /* for signing, protected by srv_mutex */
 	struct session_key session_key;
 	unsigned long lstrp; /* when we got last response from this server */
-	u16 dialect; /* dialect index that server chose */
 	struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */
 	/* extended security flavors that server supports */
+	bool	sec_ntlmssp;		/* supports NTLMSSP */
+	bool	sec_kerberosu2u;	/* supports U2U Kerberos */
 	bool	sec_kerberos;		/* supports plain Kerberos */
 	bool	sec_mskerberos;		/* supports legacy MS Kerberos */
-	bool	sec_kerberosu2u;	/* supports U2U Kerberos */
-	bool	sec_ntlmssp;		/* supports NTLMSSP */
-	bool session_estab; /* mark when very first sess is established */
+	struct delayed_work	echo; /* echo ping workqueue job */
 #ifdef CONFIG_CIFS_FSCACHE
 	struct fscache_cookie   *fscache; /* client index cache cookie */
 #endif
+#ifdef CONFIG_CIFS_STATS2
+	atomic_t inSend; /* requests trying to send */
+	atomic_t num_waiters;   /* blocked waiting to get in sendrecv */
+#endif
 };
 
 /*
+ * Macros to allow the TCP_Server_Info->net field and related code to drop out
+ * when CONFIG_NET_NS isn't set.
+ */
+
+#ifdef CONFIG_NET_NS
+
+static inline struct net *cifs_net_ns(struct TCP_Server_Info *srv)
+{
+	return srv->net;
+}
+
+static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
+{
+	srv->net = net;
+}
+
+#else
+
+static inline struct net *cifs_net_ns(struct TCP_Server_Info *srv)
+{
+	return &init_net;
+}
+
+static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
+{
+}
+
+#endif
+
+/*
  * Session structure.  One of these for each uid session with a particular host
  */
 struct cifsSesInfo {
@@ -446,11 +474,11 @@
 	/* BB add in lists for dirty pages i.e. write caching info for oplock */
 	struct list_head openFileList;
 	__u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
-	unsigned long time;	/* jiffies of last update/check of inode */
-	bool clientCanCacheRead:1;	/* read oplock */
-	bool clientCanCacheAll:1;	/* read and writebehind oplock */
-	bool delete_pending:1;		/* DELETE_ON_CLOSE is set */
-	bool invalid_mapping:1;		/* pagecache is invalid */
+	bool clientCanCacheRead;	/* read oplock */
+	bool clientCanCacheAll;		/* read and writebehind oplock */
+	bool delete_pending;		/* DELETE_ON_CLOSE is set */
+	bool invalid_mapping;		/* pagecache is invalid */
+	unsigned long time;		/* jiffies of last update of inode */
 	u64  server_eof;		/* current file size on server */
 	u64  uniqueid;			/* server inode number */
 	u64  createtime;		/* creation time on server */
@@ -508,6 +536,18 @@
 
 #endif
 
+struct mid_q_entry;
+
+/*
+ * This is the prototype for the mid callback function. When creating one,
+ * take special care to avoid deadlocks. Things to bear in mind:
+ *
+ * - it will be called by cifsd
+ * - the GlobalMid_Lock will be held
+ * - the mid will be removed from the pending_mid_q list
+ */
+typedef void (mid_callback_t)(struct mid_q_entry *mid);
+
 /* one of these for every pending CIFS request to the server */
 struct mid_q_entry {
 	struct list_head qhead;	/* mids waiting on reply from this server */
@@ -519,7 +559,8 @@
 	unsigned long when_sent; /* time when smb send finished */
 	unsigned long when_received; /* when demux complete (taken off wire) */
 #endif
-	struct task_struct *tsk;	/* task waiting for response */
+	mid_callback_t *callback; /* call completion callback */
+	void *callback_data;	  /* general purpose pointer for callback */
 	struct smb_hdr *resp_buf;	/* response buffer */
 	int midState;	/* wish this were enum but can not pass to wait_event */
 	__u8 command;	/* smb command code */
@@ -613,7 +654,7 @@
 #define   MID_REQUEST_SUBMITTED 2
 #define   MID_RESPONSE_RECEIVED 4
 #define   MID_RETRY_NEEDED      8 /* session closed while this request out */
-#define   MID_NO_RESP_NEEDED 0x10
+#define   MID_RESPONSE_MALFORMED 0x10
 
 /* Types of response buffer returned from SendReceive2 */
 #define   CIFS_NO_BUFFER        0    /* Response buffer not returned */
@@ -622,12 +663,9 @@
 #define   CIFS_IOVEC            4    /* array of response buffers */
 
 /* Type of Request to SendReceive2 */
-#define   CIFS_STD_OP	        0    /* normal request timeout */
-#define   CIFS_LONG_OP          1    /* long op (up to 45 sec, oplock time) */
-#define   CIFS_VLONG_OP         2    /* sloow op - can take up to 180 seconds */
-#define   CIFS_BLOCKING_OP      4    /* operation can block */
-#define   CIFS_ASYNC_OP         8    /* do not wait for response */
-#define   CIFS_TIMEOUT_MASK 0x00F    /* only one of 5 above set in req */
+#define   CIFS_BLOCKING_OP      1    /* operation can block */
+#define   CIFS_ASYNC_OP         2    /* do not wait for response */
+#define   CIFS_TIMEOUT_MASK 0x003    /* only one of above set in req */
 #define   CIFS_LOG_ERROR    0x010    /* log NT STATUS if non-zero */
 #define   CIFS_LARGE_BUF_OP 0x020    /* large request buffer */
 #define   CIFS_NO_RESP      0x040    /* no response buffer required */
@@ -790,6 +828,9 @@
 GLOBAL_EXTERN unsigned int cifs_min_small;  /* min size of small buf pool */
 GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/
 
+/* reconnect after this many failed echo attempts */
+GLOBAL_EXTERN unsigned short echo_retries;
+
 void cifs_oplock_break(struct work_struct *work);
 void cifs_oplock_break_get(struct cifsFileInfo *cfile);
 void cifs_oplock_break_put(struct cifsFileInfo *cfile);
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index de36b09..b5c8cc5 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -23,6 +23,7 @@
 #define _CIFSPDU_H
 
 #include <net/sock.h>
+#include <asm/unaligned.h>
 #include "smbfsctl.h"
 
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
@@ -50,6 +51,7 @@
 #define SMB_COM_SETATTR               0x09 /* trivial response */
 #define SMB_COM_LOCKING_ANDX          0x24 /* trivial response */
 #define SMB_COM_COPY                  0x29 /* trivial rsp, fail filename ignrd*/
+#define SMB_COM_ECHO                  0x2B /* echo request */
 #define SMB_COM_OPEN_ANDX             0x2D /* Legacy open for old servers */
 #define SMB_COM_READ_ANDX             0x2E
 #define SMB_COM_WRITE_ANDX            0x2F
@@ -425,11 +427,49 @@
 	__u16 Mid;
 	__u8 WordCount;
 } __attribute__((packed));
-/* given a pointer to an smb_hdr retrieve the value of byte count */
-#define BCC(smb_var) (*(__u16 *)((char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount)))
-#define BCC_LE(smb_var) (*(__le16 *)((char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount)))
+
+/* given a pointer to an smb_hdr retrieve a char pointer to the byte count */
+#define BCC(smb_var) ((unsigned char *)(smb_var) + sizeof(struct smb_hdr) + \
+			 (2 * (smb_var)->WordCount))
+
 /* given a pointer to an smb_hdr retrieve the pointer to the byte area */
-#define pByteArea(smb_var) ((unsigned char *)(smb_var) + sizeof(struct smb_hdr) + (2 * (smb_var)->WordCount) + 2)
+#define pByteArea(smb_var) (BCC(smb_var) + 2)
+
+/* get the converted ByteCount for a SMB packet and return it */
+static inline __u16
+get_bcc(struct smb_hdr *hdr)
+{
+	__u16 *bc_ptr = (__u16 *)BCC(hdr);
+
+	return get_unaligned(bc_ptr);
+}
+
+/* get the unconverted ByteCount for a SMB packet and return it */
+static inline __u16
+get_bcc_le(struct smb_hdr *hdr)
+{
+	__le16 *bc_ptr = (__le16 *)BCC(hdr);
+
+	return get_unaligned_le16(bc_ptr);
+}
+
+/* set the ByteCount for a SMB packet in host-byte order */
+static inline void
+put_bcc(__u16 count, struct smb_hdr *hdr)
+{
+	__u16 *bc_ptr = (__u16 *)BCC(hdr);
+
+	put_unaligned(count, bc_ptr);
+}
+
+/* set the ByteCount for a SMB packet in little-endian */
+static inline void
+put_bcc_le(__u16 count, struct smb_hdr *hdr)
+{
+	__le16 *bc_ptr = (__le16 *)BCC(hdr);
+
+	put_unaligned_le16(count, bc_ptr);
+}
 
 /*
  * Computer Name Length (since Netbios name was length 16 with last byte 0x20)
@@ -760,6 +800,20 @@
  *
  */
 
+typedef struct smb_com_echo_req {
+	struct	smb_hdr hdr;
+	__le16	EchoCount;
+	__le16	ByteCount;
+	char	Data[1];
+} __attribute__((packed)) ECHO_REQ;
+
+typedef struct smb_com_echo_rsp {
+	struct	smb_hdr hdr;
+	__le16	SequenceNumber;
+	__le16	ByteCount;
+	char	Data[1];
+} __attribute__((packed)) ECHO_RSP;
+
 typedef struct smb_com_logoff_andx_req {
 	struct smb_hdr hdr;	/* wct = 2 */
 	__u8 AndXCommand;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index e6d1481..8096f27 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -61,6 +61,12 @@
 		const char *fullpath, const struct dfs_info3_param *ref,
 		char **devname);
 /* extern void renew_parental_timestamps(struct dentry *direntry);*/
+extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
+					struct TCP_Server_Info *server);
+extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
+extern int cifs_call_async(struct TCP_Server_Info *server,
+			   struct smb_hdr *in_buf, mid_callback_t *callback,
+			   void *cbdata);
 extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *,
 			struct smb_hdr * /* input */ ,
 			struct smb_hdr * /* out */ ,
@@ -79,6 +85,8 @@
 extern bool is_valid_oplock_break(struct smb_hdr *smb,
 				  struct TCP_Server_Info *);
 extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
+extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
+			    unsigned int bytes_written);
 extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, bool);
 extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool);
 extern unsigned int smbCalcSize(struct smb_hdr *ptr);
@@ -347,12 +355,13 @@
 			const __u16 netfid, const __u64 len,
 			const __u64 offset, const __u32 numUnlock,
 			const __u32 numLock, const __u8 lockType,
-			const bool waitFlag);
+			const bool waitFlag, const __u8 oplock_level);
 extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
 			const __u16 smb_file_id, const int get_flag,
 			const __u64 len, struct file_lock *,
 			const __u16 lock_type, const bool waitFlag);
 extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon);
+extern int CIFSSMBEcho(struct TCP_Server_Info *server);
 extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses);
 
 extern struct cifsSesInfo *sesInfoAlloc(void);
@@ -366,7 +375,7 @@
 extern int cifs_verify_signature(struct smb_hdr *,
 				 struct TCP_Server_Info *server,
 				__u32 expected_sequence_number);
-extern void SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *);
+extern int SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *);
 extern int setup_ntlm_response(struct cifsSesInfo *);
 extern int setup_ntlmv2_rsp(struct cifsSesInfo *, const struct nls_table *);
 extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *);
@@ -416,4 +425,11 @@
 extern int CIFSCheckMFSymlink(struct cifs_fattr *fattr,
 		const unsigned char *path,
 		struct cifs_sb_info *cifs_sb, int xid);
+extern int mdfour(unsigned char *, unsigned char *, int);
+extern int E_md4hash(const unsigned char *passwd, unsigned char *p16);
+extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8,
+			unsigned char *p24);
+extern void E_P16(unsigned char *p14, unsigned char *p16);
+extern void E_P24(unsigned char *p21, const unsigned char *c8,
+			unsigned char *p24);
 #endif			/* _CIFSPROTO_H */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 2f6795e..904aa47 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -136,9 +136,6 @@
 		}
 	}
 
-	if (ses->status == CifsExiting)
-		return -EIO;
-
 	/*
 	 * Give demultiplex thread up to 10 seconds to reconnect, should be
 	 * greater than cifs socket timeout which is 7 seconds
@@ -156,7 +153,7 @@
 		 * retrying until process is killed or server comes
 		 * back on-line
 		 */
-		if (!tcon->retry || ses->status == CifsExiting) {
+		if (!tcon->retry) {
 			cFYI(1, "gave up waiting on reconnect in smb_init");
 			return -EHOSTDOWN;
 		}
@@ -331,37 +328,35 @@
 
 static int validate_t2(struct smb_t2_rsp *pSMB)
 {
-	int rc = -EINVAL;
-	int total_size;
-	char *pBCC;
+	unsigned int total_size;
 
-	/* check for plausible wct, bcc and t2 data and parm sizes */
+	/* check for plausible wct */
+	if (pSMB->hdr.WordCount < 10)
+		goto vt2_err;
+
 	/* check for parm and data offset going beyond end of smb */
-	if (pSMB->hdr.WordCount >= 10) {
-		if ((le16_to_cpu(pSMB->t2_rsp.ParameterOffset) <= 1024) &&
-		   (le16_to_cpu(pSMB->t2_rsp.DataOffset) <= 1024)) {
-			/* check that bcc is at least as big as parms + data */
-			/* check that bcc is less than negotiated smb buffer */
-			total_size = le16_to_cpu(pSMB->t2_rsp.ParameterCount);
-			if (total_size < 512) {
-				total_size +=
-					le16_to_cpu(pSMB->t2_rsp.DataCount);
-				/* BCC le converted in SendReceive */
-				pBCC = (pSMB->hdr.WordCount * 2) +
-					sizeof(struct smb_hdr) +
-					(char *)pSMB;
-				if ((total_size <= (*(u16 *)pBCC)) &&
-				   (total_size <
-					CIFSMaxBufSize+MAX_CIFS_HDR_SIZE)) {
-					return 0;
-				}
-			}
-		}
-	}
+	if (get_unaligned_le16(&pSMB->t2_rsp.ParameterOffset) > 1024 ||
+	    get_unaligned_le16(&pSMB->t2_rsp.DataOffset) > 1024)
+		goto vt2_err;
+
+	/* check that bcc is at least as big as parms + data */
+	/* check that bcc is less than negotiated smb buffer */
+	total_size = get_unaligned_le16(&pSMB->t2_rsp.ParameterCount);
+	if (total_size >= 512)
+		goto vt2_err;
+
+	total_size += get_unaligned_le16(&pSMB->t2_rsp.DataCount);
+	if (total_size > get_bcc(&pSMB->hdr) ||
+	    total_size >= CIFSMaxBufSize + MAX_CIFS_HDR_SIZE)
+		goto vt2_err;
+
+	return 0;
+vt2_err:
 	cifs_dump_mem("Invalid transact2 SMB: ", (char *)pSMB,
 		sizeof(struct smb_t2_rsp) + 16);
-	return rc;
+	return -EINVAL;
 }
+
 int
 CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
 {
@@ -452,7 +447,6 @@
 		server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize),
 				(__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
 		server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs);
-		GETU32(server->sessid) = le32_to_cpu(rsp->SessionKey);
 		/* even though we do not use raw we might as well set this
 		accurately, in case we ever find a need for it */
 		if ((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) {
@@ -566,7 +560,6 @@
 			(__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
 	server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
 	cFYI(DBG2, "Max buf = %d", ses->server->maxBuf);
-	GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey);
 	server->capabilities = le32_to_cpu(pSMBr->Capabilities);
 	server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone);
 	server->timeAdj *= 60;
@@ -706,6 +699,53 @@
 	return rc;
 }
 
+/*
+ * This is a no-op for now. We're not really interested in the reply, but
+ * rather in the fact that the server sent one and that server->lstrp
+ * gets updated.
+ *
+ * FIXME: maybe we should consider checking that the reply matches request?
+ */
+static void
+cifs_echo_callback(struct mid_q_entry *mid)
+{
+	struct TCP_Server_Info *server = mid->callback_data;
+
+	DeleteMidQEntry(mid);
+	atomic_dec(&server->inFlight);
+	wake_up(&server->request_q);
+}
+
+int
+CIFSSMBEcho(struct TCP_Server_Info *server)
+{
+	ECHO_REQ *smb;
+	int rc = 0;
+
+	cFYI(1, "In echo request");
+
+	rc = small_smb_init(SMB_COM_ECHO, 0, NULL, (void **)&smb);
+	if (rc)
+		return rc;
+
+	/* set up echo request */
+	smb->hdr.Tid = cpu_to_le16(0xffff);
+	smb->hdr.WordCount = 1;
+	put_unaligned_le16(1, &smb->EchoCount);
+	put_bcc_le(1, &smb->hdr);
+	smb->Data[0] = 'a';
+	smb->hdr.smb_buf_length += 3;
+
+	rc = cifs_call_async(server, (struct smb_hdr *)smb,
+				cifs_echo_callback, server);
+	if (rc)
+		cFYI(1, "Echo request failed: %d", rc);
+
+	cifs_small_buf_release(smb);
+
+	return rc;
+}
+
 int
 CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
 {
@@ -1193,7 +1233,7 @@
 	pSMB->ByteCount = cpu_to_le16(count);
 	/* long_op set to 1 to allow for oplock break timeouts */
 	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
-			(struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP);
+			(struct smb_hdr *)pSMBr, &bytes_returned, 0);
 	cifs_stats_inc(&tcon->num_opens);
 	if (rc) {
 		cFYI(1, "Error in Open = %d", rc);
@@ -1306,7 +1346,7 @@
 	pSMB->ByteCount = cpu_to_le16(count);
 	/* long_op set to 1 to allow for oplock break timeouts */
 	rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
-			(struct smb_hdr *)pSMBr, &bytes_returned, CIFS_LONG_OP);
+			(struct smb_hdr *)pSMBr, &bytes_returned, 0);
 	cifs_stats_inc(&tcon->num_opens);
 	if (rc) {
 		cFYI(1, "Error in Open = %d", rc);
@@ -1388,7 +1428,7 @@
 	iov[0].iov_base = (char *)pSMB;
 	iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
 	rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovecs */,
-			 &resp_buf_type, CIFS_STD_OP | CIFS_LOG_ERROR);
+			 &resp_buf_type, CIFS_LOG_ERROR);
 	cifs_stats_inc(&tcon->num_reads);
 	pSMBr = (READ_RSP *)iov[0].iov_base;
 	if (rc) {
@@ -1663,7 +1703,8 @@
 CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
 	    const __u16 smb_file_id, const __u64 len,
 	    const __u64 offset, const __u32 numUnlock,
-	    const __u32 numLock, const __u8 lockType, const bool waitFlag)
+	    const __u32 numLock, const __u8 lockType,
+	    const bool waitFlag, const __u8 oplock_level)
 {
 	int rc = 0;
 	LOCK_REQ *pSMB = NULL;
@@ -1691,6 +1732,7 @@
 	pSMB->NumberOfLocks = cpu_to_le16(numLock);
 	pSMB->NumberOfUnlocks = cpu_to_le16(numUnlock);
 	pSMB->LockType = lockType;
+	pSMB->OplockLevel = oplock_level;
 	pSMB->AndXCommand = 0xFF;	/* none */
 	pSMB->Fid = smb_file_id; /* netfid stays le */
 
@@ -3087,7 +3129,7 @@
 	iov[0].iov_len = pSMB->hdr.smb_buf_length + 4;
 
 	rc = SendReceive2(xid, tcon->ses, iov, 1 /* num iovec */, &buf_type,
-			 CIFS_STD_OP);
+			 0);
 	cifs_stats_inc(&tcon->num_acl_get);
 	if (rc) {
 		cFYI(1, "Send error in QuerySecDesc = %d", rc);
@@ -4869,7 +4911,6 @@
 		   __u16 fid, __u32 pid_of_opener, bool SetAllocation)
 {
 	struct smb_com_transaction2_sfi_req *pSMB  = NULL;
-	char *data_offset;
 	struct file_end_of_file_info *parm_data;
 	int rc = 0;
 	__u16 params, param_offset, offset, byte_count, count;
@@ -4893,8 +4934,6 @@
 	param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
 	offset = param_offset + params;
 
-	data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
-
 	count = sizeof(struct file_end_of_file_info);
 	pSMB->MaxParameterCount = cpu_to_le16(2);
 	/* BB find exact max SMB PDU from sess structure BB */
@@ -5562,7 +5601,7 @@
 	}
 
 	/* make sure list_len doesn't go past end of SMB */
-	end_of_smb = (char *)pByteArea(&pSMBr->hdr) + BCC(&pSMBr->hdr);
+	end_of_smb = (char *)pByteArea(&pSMBr->hdr) + get_bcc(&pSMBr->hdr);
 	if ((char *)ea_response_data + list_len > end_of_smb) {
 		cFYI(1, "EA list appears to go beyond SMB");
 		rc = -EIO;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 9f59887..8d6c17a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -52,8 +52,8 @@
 #define CIFS_PORT 445
 #define RFC1001_PORT 139
 
-extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8,
-			 unsigned char *p24);
+/* SMB echo "timeout" -- FIXME: tunable? */
+#define SMB_ECHO_INTERVAL (60 * HZ)
 
 extern mempool_t *cifs_req_poolp;
 
@@ -84,6 +84,7 @@
 	bool no_xattr:1;   /* set if xattr (EA) support should be disabled*/
 	bool server_ino:1; /* use inode numbers from server ie UniqueId */
 	bool direct_io:1;
+	bool strict_io:1; /* strict cache behavior */
 	bool remap:1;      /* set to remap seven reserved chars in filenames */
 	bool posix_paths:1; /* unset to not ask for posix pathnames. */
 	bool no_linux_ext:1;
@@ -152,6 +153,7 @@
 
 	/* before reconnecting the tcp session, mark the smb session (uid)
 		and the tid bad so they are not used until reconnected */
+	cFYI(1, "%s: marking sessions and tcons for reconnect", __func__);
 	spin_lock(&cifs_tcp_ses_lock);
 	list_for_each(tmp, &server->smb_ses_list) {
 		ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
@@ -163,7 +165,9 @@
 		}
 	}
 	spin_unlock(&cifs_tcp_ses_lock);
+
 	/* do not want to be sending data on a socket we are freeing */
+	cFYI(1, "%s: tearing down socket", __func__);
 	mutex_lock(&server->srv_mutex);
 	if (server->ssocket) {
 		cFYI(1, "State: 0x%x Flags: 0x%lx", server->ssocket->state,
@@ -180,22 +184,20 @@
 	kfree(server->session_key.response);
 	server->session_key.response = NULL;
 	server->session_key.len = 0;
+	server->lstrp = jiffies;
+	mutex_unlock(&server->srv_mutex);
 
+	/* mark submitted MIDs for retry and issue callback */
+	cFYI(1, "%s: issuing mid callbacks", __func__);
 	spin_lock(&GlobalMid_Lock);
-	list_for_each(tmp, &server->pending_mid_q) {
-		mid_entry = list_entry(tmp, struct
-					mid_q_entry,
-					qhead);
-		if (mid_entry->midState == MID_REQUEST_SUBMITTED) {
-				/* Mark other intransit requests as needing
-				   retry so we do not immediately mark the
-				   session bad again (ie after we reconnect
-				   below) as they timeout too */
+	list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
+		mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+		if (mid_entry->midState == MID_REQUEST_SUBMITTED)
 			mid_entry->midState = MID_RETRY_NEEDED;
-		}
+		list_del_init(&mid_entry->qhead);
+		mid_entry->callback(mid_entry);
 	}
 	spin_unlock(&GlobalMid_Lock);
-	mutex_unlock(&server->srv_mutex);
 
 	while ((server->tcpStatus != CifsExiting) &&
 	       (server->tcpStatus != CifsGood)) {
@@ -212,10 +214,9 @@
 			if (server->tcpStatus != CifsExiting)
 				server->tcpStatus = CifsGood;
 			spin_unlock(&GlobalMid_Lock);
-	/*		atomic_set(&server->inFlight,0);*/
-			wake_up(&server->response_q);
 		}
 	}
+
 	return rc;
 }
 
@@ -229,9 +230,8 @@
 static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
 {
 	struct smb_t2_rsp *pSMBt;
-	int total_data_size;
-	int data_in_this_rsp;
 	int remaining;
+	__u16 total_data_size, data_in_this_rsp;
 
 	if (pSMB->Command != SMB_COM_TRANSACTION2)
 		return 0;
@@ -245,8 +245,8 @@
 
 	pSMBt = (struct smb_t2_rsp *)pSMB;
 
-	total_data_size = le16_to_cpu(pSMBt->t2_rsp.TotalDataCount);
-	data_in_this_rsp = le16_to_cpu(pSMBt->t2_rsp.DataCount);
+	total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
+	data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
 
 	remaining = total_data_size - data_in_this_rsp;
 
@@ -272,21 +272,18 @@
 {
 	struct smb_t2_rsp *pSMB2 = (struct smb_t2_rsp *)psecond;
 	struct smb_t2_rsp *pSMBt  = (struct smb_t2_rsp *)pTargetSMB;
-	int total_data_size;
-	int total_in_buf;
-	int remaining;
-	int total_in_buf2;
 	char *data_area_of_target;
 	char *data_area_of_buf2;
-	__u16 byte_count;
+	int remaining;
+	__u16 byte_count, total_data_size, total_in_buf, total_in_buf2;
 
-	total_data_size = le16_to_cpu(pSMBt->t2_rsp.TotalDataCount);
+	total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
 
-	if (total_data_size != le16_to_cpu(pSMB2->t2_rsp.TotalDataCount)) {
+	if (total_data_size !=
+	    get_unaligned_le16(&pSMB2->t2_rsp.TotalDataCount))
 		cFYI(1, "total data size of primary and secondary t2 differ");
-	}
 
-	total_in_buf = le16_to_cpu(pSMBt->t2_rsp.DataCount);
+	total_in_buf = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
 
 	remaining = total_data_size - total_in_buf;
 
@@ -296,28 +293,28 @@
 	if (remaining == 0) /* nothing to do, ignore */
 		return 0;
 
-	total_in_buf2 = le16_to_cpu(pSMB2->t2_rsp.DataCount);
+	total_in_buf2 = get_unaligned_le16(&pSMB2->t2_rsp.DataCount);
 	if (remaining < total_in_buf2) {
 		cFYI(1, "transact2 2nd response contains too much data");
 	}
 
 	/* find end of first SMB data area */
 	data_area_of_target = (char *)&pSMBt->hdr.Protocol +
-				le16_to_cpu(pSMBt->t2_rsp.DataOffset);
+				get_unaligned_le16(&pSMBt->t2_rsp.DataOffset);
 	/* validate target area */
 
-	data_area_of_buf2 = (char *) &pSMB2->hdr.Protocol +
-					le16_to_cpu(pSMB2->t2_rsp.DataOffset);
+	data_area_of_buf2 = (char *)&pSMB2->hdr.Protocol +
+				get_unaligned_le16(&pSMB2->t2_rsp.DataOffset);
 
 	data_area_of_target += total_in_buf;
 
 	/* copy second buffer into end of first buffer */
 	memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2);
 	total_in_buf += total_in_buf2;
-	pSMBt->t2_rsp.DataCount = cpu_to_le16(total_in_buf);
-	byte_count = le16_to_cpu(BCC_LE(pTargetSMB));
+	put_unaligned_le16(total_in_buf, &pSMBt->t2_rsp.DataCount);
+	byte_count = get_bcc_le(pTargetSMB);
 	byte_count += total_in_buf2;
-	BCC_LE(pTargetSMB) = cpu_to_le16(byte_count);
+	put_bcc_le(byte_count, pTargetSMB);
 
 	byte_count = pTargetSMB->smb_buf_length;
 	byte_count += total_in_buf2;
@@ -331,7 +328,31 @@
 		return 0; /* we are done */
 	} else /* more responses to go */
 		return 1;
+}
 
+static void
+cifs_echo_request(struct work_struct *work)
+{
+	int rc;
+	struct TCP_Server_Info *server = container_of(work,
+					struct TCP_Server_Info, echo.work);
+
+	/*
+	 * We cannot send an echo until the NEGOTIATE_PROTOCOL request is
+	 * done, which is indicated by maxBuf != 0. Also, no need to ping if
+	 * we got a response recently
+	 */
+	if (server->maxBuf == 0 ||
+	    time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
+		goto requeue_echo;
+
+	rc = CIFSSMBEcho(server);
+	if (rc)
+		cFYI(1, "Unable to send echo request to server: %s",
+			server->hostname);
+
+requeue_echo:
+	queue_delayed_work(system_nrt_wq, &server->echo, SMB_ECHO_INTERVAL);
 }
 
 static int
@@ -345,8 +366,7 @@
 	struct msghdr smb_msg;
 	struct kvec iov;
 	struct socket *csocket = server->ssocket;
-	struct list_head *tmp;
-	struct cifsSesInfo *ses;
+	struct list_head *tmp, *tmp2;
 	struct task_struct *task_to_wake = NULL;
 	struct mid_q_entry *mid_entry;
 	char temp;
@@ -399,7 +419,20 @@
 		smb_msg.msg_control = NULL;
 		smb_msg.msg_controllen = 0;
 		pdu_length = 4; /* enough to get RFC1001 header */
+
 incomplete_rcv:
+		if (echo_retries > 0 &&
+		    time_after(jiffies, server->lstrp +
+					(echo_retries * SMB_ECHO_INTERVAL))) {
+			cERROR(1, "Server %s has not responded in %d seconds. "
+				  "Reconnecting...", server->hostname,
+				  (echo_retries * SMB_ECHO_INTERVAL / HZ));
+			cifs_reconnect(server);
+			csocket = server->ssocket;
+			wake_up(&server->response_q);
+			continue;
+		}
+
 		length =
 		    kernel_recvmsg(csocket, &smb_msg,
 				&iov, 1, pdu_length, 0 /* BB other flags? */);
@@ -550,25 +583,36 @@
 		else if (reconnect == 1)
 			continue;
 
-		length += 4; /* account for rfc1002 hdr */
+		total_read += 4; /* account for rfc1002 hdr */
 
+		dump_smb(smb_buffer, total_read);
 
-		dump_smb(smb_buffer, length);
-		if (checkSMB(smb_buffer, smb_buffer->Mid, total_read+4)) {
-			cifs_dump_mem("Bad SMB: ", smb_buffer, 48);
-			continue;
-		}
+		/*
+		 * We know that we received enough to get to the MID as we
+		 * checked the pdu_length earlier. Now check to see
+		 * if the rest of the header is OK. We borrow the length
+		 * var for the rest of the loop to avoid a new stack var.
+		 *
+		 * 48 bytes is enough to display the header and a little bit
+		 * into the payload for debugging purposes.
+		 */
+		length = checkSMB(smb_buffer, smb_buffer->Mid, total_read);
+		if (length != 0)
+			cifs_dump_mem("Bad SMB: ", smb_buffer,
+					min_t(unsigned int, total_read, 48));
 
+		mid_entry = NULL;
+		server->lstrp = jiffies;
 
-		task_to_wake = NULL;
 		spin_lock(&GlobalMid_Lock);
-		list_for_each(tmp, &server->pending_mid_q) {
+		list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
 			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
 
 			if ((mid_entry->mid == smb_buffer->Mid) &&
 			    (mid_entry->midState == MID_REQUEST_SUBMITTED) &&
 			    (mid_entry->command == smb_buffer->Command)) {
-				if (check2ndT2(smb_buffer,server->maxBuf) > 0) {
+				if (length == 0 &&
+				   check2ndT2(smb_buffer, server->maxBuf) > 0) {
 					/* We have a multipart transact2 resp */
 					isMultiRsp = true;
 					if (mid_entry->resp_buf) {
@@ -603,20 +647,24 @@
 				mid_entry->resp_buf = smb_buffer;
 				mid_entry->largeBuf = isLargeBuf;
 multi_t2_fnd:
-				task_to_wake = mid_entry->tsk;
-				mid_entry->midState = MID_RESPONSE_RECEIVED;
+				if (length == 0)
+					mid_entry->midState =
+							MID_RESPONSE_RECEIVED;
+				else
+					mid_entry->midState =
+							MID_RESPONSE_MALFORMED;
 #ifdef CONFIG_CIFS_STATS2
 				mid_entry->when_received = jiffies;
 #endif
-				/* so we do not time out requests to  server
-				which is still responding (since server could
-				be busy but not dead) */
-				server->lstrp = jiffies;
+				list_del_init(&mid_entry->qhead);
+				mid_entry->callback(mid_entry);
 				break;
 			}
+			mid_entry = NULL;
 		}
 		spin_unlock(&GlobalMid_Lock);
-		if (task_to_wake) {
+
+		if (mid_entry != NULL) {
 			/* Was previous buf put in mpx struct for multi-rsp? */
 			if (!isMultiRsp) {
 				/* smb buffer will be freed by user thread */
@@ -625,11 +673,13 @@
 				else
 					smallbuf = NULL;
 			}
-			wake_up_process(task_to_wake);
+		} else if (length != 0) {
+			/* response sanity checks failed */
+			continue;
 		} else if (!is_valid_oplock_break(smb_buffer, server) &&
 			   !isMultiRsp) {
 			cERROR(1, "No task to wake, unknown frame received! "
-				   "NumMids %d", midCount.counter);
+				   "NumMids %d", atomic_read(&midCount));
 			cifs_dump_mem("Received Data is: ", (char *)smb_buffer,
 				      sizeof(struct smb_hdr));
 #ifdef CONFIG_CIFS_DEBUG2
@@ -677,44 +727,16 @@
 	if (smallbuf) /* no sense logging a debug message if NULL */
 		cifs_small_buf_release(smallbuf);
 
-	/*
-	 * BB: we shouldn't have to do any of this. It shouldn't be
-	 * possible to exit from the thread with active SMB sessions
-	 */
-	spin_lock(&cifs_tcp_ses_lock);
-	if (list_empty(&server->pending_mid_q)) {
-		/* loop through server session structures attached to this and
-		    mark them dead */
-		list_for_each(tmp, &server->smb_ses_list) {
-			ses = list_entry(tmp, struct cifsSesInfo,
-					 smb_ses_list);
-			ses->status = CifsExiting;
-			ses->server = NULL;
-		}
-		spin_unlock(&cifs_tcp_ses_lock);
-	} else {
-		/* although we can not zero the server struct pointer yet,
-		since there are active requests which may depnd on them,
-		mark the corresponding SMB sessions as exiting too */
-		list_for_each(tmp, &server->smb_ses_list) {
-			ses = list_entry(tmp, struct cifsSesInfo,
-					 smb_ses_list);
-			ses->status = CifsExiting;
-		}
-
+	if (!list_empty(&server->pending_mid_q)) {
 		spin_lock(&GlobalMid_Lock);
-		list_for_each(tmp, &server->pending_mid_q) {
-		mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
-			if (mid_entry->midState == MID_REQUEST_SUBMITTED) {
-				cFYI(1, "Clearing Mid 0x%x - waking up ",
+		list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
+			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+			cFYI(1, "Clearing Mid 0x%x - issuing callback",
 					 mid_entry->mid);
-				task_to_wake = mid_entry->tsk;
-				if (task_to_wake)
-					wake_up_process(task_to_wake);
-			}
+			list_del_init(&mid_entry->qhead);
+			mid_entry->callback(mid_entry);
 		}
 		spin_unlock(&GlobalMid_Lock);
-		spin_unlock(&cifs_tcp_ses_lock);
 		/* 1/8th of sec is more than enough time for them to exit */
 		msleep(125);
 	}
@@ -732,18 +754,6 @@
 		coming home not much else we can do but free the memory */
 	}
 
-	/* last chance to mark ses pointers invalid
-	if there are any pointing to this (e.g
-	if a crazy root user tried to kill cifsd
-	kernel thread explicitly this might happen) */
-	/* BB: This shouldn't be necessary, see above */
-	spin_lock(&cifs_tcp_ses_lock);
-	list_for_each(tmp, &server->smb_ses_list) {
-		ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
-		ses->server = NULL;
-	}
-	spin_unlock(&cifs_tcp_ses_lock);
-
 	kfree(server->hostname);
 	task_to_wake = xchg(&server->tsk, NULL);
 	kfree(server);
@@ -1355,6 +1365,8 @@
 			vol->direct_io = 1;
 		} else if (strnicmp(data, "forcedirectio", 13) == 0) {
 			vol->direct_io = 1;
+		} else if (strnicmp(data, "strictcache", 11) == 0) {
+			vol->strict_io = 1;
 		} else if (strnicmp(data, "noac", 4) == 0) {
 			printk(KERN_WARNING "CIFS: Mount option noac not "
 				"supported. Instead set "
@@ -1579,6 +1591,9 @@
 
 	spin_lock(&cifs_tcp_ses_lock);
 	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+		if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
+			continue;
+
 		if (!match_address(server, addr,
 				   (struct sockaddr *)&vol->srcaddr))
 			continue;
@@ -1609,9 +1624,13 @@
 		return;
 	}
 
+	put_net(cifs_net_ns(server));
+
 	list_del_init(&server->tcp_ses_list);
 	spin_unlock(&cifs_tcp_ses_lock);
 
+	cancel_delayed_work_sync(&server->echo);
+
 	spin_lock(&GlobalMid_Lock);
 	server->tcpStatus = CifsExiting;
 	spin_unlock(&GlobalMid_Lock);
@@ -1681,6 +1700,7 @@
 		goto out_err;
 	}
 
+	cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
 	tcp_ses->hostname = extract_hostname(volume_info->UNC);
 	if (IS_ERR(tcp_ses->hostname)) {
 		rc = PTR_ERR(tcp_ses->hostname);
@@ -1701,8 +1721,10 @@
 		volume_info->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
 	tcp_ses->session_estab = false;
 	tcp_ses->sequence_number = 0;
+	tcp_ses->lstrp = jiffies;
 	INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
 	INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
+	INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
 
 	/*
 	 * at this point we are the only ones with the pointer
@@ -1751,11 +1773,16 @@
 
 	cifs_fscache_get_client_cookie(tcp_ses);
 
+	/* queue echo request delayed work */
+	queue_delayed_work(system_nrt_wq, &tcp_ses->echo, SMB_ECHO_INTERVAL);
+
 	return tcp_ses;
 
 out_err_crypto_release:
 	cifs_crypto_shash_release(tcp_ses);
 
+	put_net(cifs_net_ns(tcp_ses));
+
 out_err:
 	if (tcp_ses) {
 		if (!IS_ERR(tcp_ses->hostname))
@@ -2267,8 +2294,8 @@
 	}
 
 	if (socket == NULL) {
-		rc = sock_create_kern(sfamily, SOCK_STREAM,
-				      IPPROTO_TCP, &socket);
+		rc = __sock_create(cifs_net_ns(server), sfamily, SOCK_STREAM,
+				   IPPROTO_TCP, &socket, 1);
 		if (rc < 0) {
 			cERROR(1, "Error %d creating socket", rc);
 			server->ssocket = NULL;
@@ -2580,6 +2607,8 @@
 	if (pvolume_info->multiuser)
 		cifs_sb->mnt_cifs_flags |= (CIFS_MOUNT_MULTIUSER |
 					    CIFS_MOUNT_NO_PERM);
+	if (pvolume_info->strict_io)
+		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_STRICT_IO;
 	if (pvolume_info->direct_io) {
 		cFYI(1, "mounting share using direct i/o");
 		cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO;
@@ -2936,8 +2965,8 @@
 	TCONX_RSP *pSMBr;
 	unsigned char *bcc_ptr;
 	int rc = 0;
-	int length, bytes_left;
-	__u16 count;
+	int length;
+	__u16 bytes_left, count;
 
 	if (ses == NULL)
 		return -EIO;
@@ -2965,7 +2994,7 @@
 		bcc_ptr++;              /* skip password */
 		/* already aligned so no need to do it below */
 	} else {
-		pSMB->PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE);
+		pSMB->PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
 		/* BB FIXME add code to fail this if NTLMv2 or Kerberos
 		   specified as required (when that support is added to
 		   the vfs in the future) as only NTLM or the much
@@ -2981,9 +3010,10 @@
 					 bcc_ptr);
 		else
 #endif /* CIFS_WEAK_PW_HASH */
-		SMBNTencrypt(tcon->password, ses->server->cryptkey, bcc_ptr);
+		rc = SMBNTencrypt(tcon->password, ses->server->cryptkey,
+					bcc_ptr);
 
-		bcc_ptr += CIFS_SESS_KEY_SIZE;
+		bcc_ptr += CIFS_AUTH_RESP_SIZE;
 		if (ses->capabilities & CAP_UNICODE) {
 			/* must align unicode strings */
 			*bcc_ptr = 0; /* null byte password */
@@ -3021,7 +3051,7 @@
 	pSMB->ByteCount = cpu_to_le16(count);
 
 	rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length,
-			 CIFS_STD_OP);
+			 0);
 
 	/* above now done in SendReceive */
 	if ((rc == 0) && (tcon != NULL)) {
@@ -3031,7 +3061,7 @@
 		tcon->need_reconnect = false;
 		tcon->tid = smb_buffer_response->Tid;
 		bcc_ptr = pByteArea(smb_buffer_response);
-		bytes_left = BCC(smb_buffer_response);
+		bytes_left = get_bcc(smb_buffer_response);
 		length = strnlen(bcc_ptr, bytes_left - 2);
 		if (smb_buffer->Flags2 & SMBFLG2_UNICODE)
 			is_unicode = true;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index d843631..e964b1c 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -287,6 +287,7 @@
 	struct inode *inode = cifs_file->dentry->d_inode;
 	struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
+	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
 	struct cifsLockInfo *li, *tmp;
 
 	spin_lock(&cifs_file_list_lock);
@@ -302,6 +303,13 @@
 	if (list_empty(&cifsi->openFileList)) {
 		cFYI(1, "closing last open instance for inode %p",
 			cifs_file->dentry->d_inode);
+
+		/* in strict cache mode we need invalidate mapping on the last
+		   close  because it may cause a error when we open this file
+		   again and get at least level II oplock */
+		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
+			CIFS_I(inode)->invalid_mapping = true;
+
 		cifs_set_oplock_level(cifsi, 0);
 	}
 	spin_unlock(&cifs_file_list_lock);
@@ -338,7 +346,6 @@
 	struct cifsTconInfo *tcon;
 	struct tcon_link *tlink;
 	struct cifsFileInfo *pCifsFile = NULL;
-	struct cifsInodeInfo *pCifsInode;
 	char *full_path = NULL;
 	bool posix_open_ok = false;
 	__u16 netfid;
@@ -353,8 +360,6 @@
 	}
 	tcon = tlink_tcon(tlink);
 
-	pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
-
 	full_path = build_path_from_dentry(file->f_path.dentry);
 	if (full_path == NULL) {
 		rc = -ENOMEM;
@@ -726,12 +731,12 @@
 
 		/* BB we could chain these into one lock request BB */
 		rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start,
-				 0, 1, lockType, 0 /* wait flag */ );
+				 0, 1, lockType, 0 /* wait flag */, 0);
 		if (rc == 0) {
 			rc = CIFSSMBLock(xid, tcon, netfid, length,
 					 pfLock->fl_start, 1 /* numUnlock */ ,
 					 0 /* numLock */ , lockType,
-					 0 /* wait flag */ );
+					 0 /* wait flag */, 0);
 			pfLock->fl_type = F_UNLCK;
 			if (rc != 0)
 				cERROR(1, "Error unlocking previously locked "
@@ -748,13 +753,13 @@
 				rc = CIFSSMBLock(xid, tcon, netfid, length,
 					pfLock->fl_start, 0, 1,
 					lockType | LOCKING_ANDX_SHARED_LOCK,
-					0 /* wait flag */);
+					0 /* wait flag */, 0);
 				if (rc == 0) {
 					rc = CIFSSMBLock(xid, tcon, netfid,
 						length, pfLock->fl_start, 1, 0,
 						lockType |
 						LOCKING_ANDX_SHARED_LOCK,
-						0 /* wait flag */);
+						0 /* wait flag */, 0);
 					pfLock->fl_type = F_RDLCK;
 					if (rc != 0)
 						cERROR(1, "Error unlocking "
@@ -797,8 +802,8 @@
 
 		if (numLock) {
 			rc = CIFSSMBLock(xid, tcon, netfid, length,
-					pfLock->fl_start,
-					0, numLock, lockType, wait_flag);
+					 pfLock->fl_start, 0, numLock, lockType,
+					 wait_flag, 0);
 
 			if (rc == 0) {
 				/* For Windows locks we must store them. */
@@ -818,9 +823,9 @@
 						(pfLock->fl_start + length) >=
 						(li->offset + li->length)) {
 					stored_rc = CIFSSMBLock(xid, tcon,
-							netfid,
-							li->length, li->offset,
-							1, 0, li->type, false);
+							netfid, li->length,
+							li->offset, 1, 0,
+							li->type, false, 0);
 					if (stored_rc)
 						rc = stored_rc;
 					else {
@@ -839,31 +844,8 @@
 	return rc;
 }
 
-/*
- * Set the timeout on write requests past EOF. For some servers (Windows)
- * these calls can be very long.
- *
- * If we're writing >10M past the EOF we give a 180s timeout. Anything less
- * than that gets a 45s timeout. Writes not past EOF get 15s timeouts.
- * The 10M cutoff is totally arbitrary. A better scheme for this would be
- * welcome if someone wants to suggest one.
- *
- * We may be able to do a better job with this if there were some way to
- * declare that a file should be sparse.
- */
-static int
-cifs_write_timeout(struct cifsInodeInfo *cifsi, loff_t offset)
-{
-	if (offset <= cifsi->server_eof)
-		return CIFS_STD_OP;
-	else if (offset > (cifsi->server_eof + (10 * 1024 * 1024)))
-		return CIFS_VLONG_OP;
-	else
-		return CIFS_LONG_OP;
-}
-
 /* update the file size (if needed) after a write */
-static void
+void
 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
 		      unsigned int bytes_written)
 {
@@ -882,7 +864,7 @@
 	unsigned int total_written;
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
-	int xid, long_op;
+	int xid;
 	struct cifsFileInfo *open_file;
 	struct cifsInodeInfo *cifsi = CIFS_I(inode);
 
@@ -903,7 +885,6 @@
 
 	xid = GetXid();
 
-	long_op = cifs_write_timeout(cifsi, *poffset);
 	for (total_written = 0; write_size > total_written;
 	     total_written += bytes_written) {
 		rc = -EAGAIN;
@@ -931,7 +912,7 @@
 				min_t(const int, cifs_sb->wsize,
 				      write_size - total_written),
 				*poffset, &bytes_written,
-				NULL, write_data + total_written, long_op);
+				NULL, write_data + total_written, 0);
 		}
 		if (rc || (bytes_written == 0)) {
 			if (total_written)
@@ -944,8 +925,6 @@
 			cifs_update_eof(cifsi, *poffset, bytes_written);
 			*poffset += bytes_written;
 		}
-		long_op = CIFS_STD_OP; /* subsequent writes fast -
-				    15 seconds is plenty */
 	}
 
 	cifs_stats_bytes_written(pTcon, total_written);
@@ -974,7 +953,7 @@
 	unsigned int total_written;
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
-	int xid, long_op;
+	int xid;
 	struct dentry *dentry = open_file->dentry;
 	struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
 
@@ -987,7 +966,6 @@
 
 	xid = GetXid();
 
-	long_op = cifs_write_timeout(cifsi, *poffset);
 	for (total_written = 0; write_size > total_written;
 	     total_written += bytes_written) {
 		rc = -EAGAIN;
@@ -1017,7 +995,7 @@
 				rc = CIFSSMBWrite2(xid, pTcon,
 						open_file->netfid, len,
 						*poffset, &bytes_written,
-						iov, 1, long_op);
+						iov, 1, 0);
 			} else
 				rc = CIFSSMBWrite(xid, pTcon,
 					 open_file->netfid,
@@ -1025,7 +1003,7 @@
 					       write_size - total_written),
 					 *poffset, &bytes_written,
 					 write_data + total_written,
-					 NULL, long_op);
+					 NULL, 0);
 		}
 		if (rc || (bytes_written == 0)) {
 			if (total_written)
@@ -1038,8 +1016,6 @@
 			cifs_update_eof(cifsi, *poffset, bytes_written);
 			*poffset += bytes_written;
 		}
-		long_op = CIFS_STD_OP; /* subsequent writes fast -
-				    15 seconds is plenty */
 	}
 
 	cifs_stats_bytes_written(pTcon, total_written);
@@ -1167,7 +1143,6 @@
 	char *write_data;
 	int rc = -EFAULT;
 	int bytes_written = 0;
-	struct cifs_sb_info *cifs_sb;
 	struct inode *inode;
 	struct cifsFileInfo *open_file;
 
@@ -1175,7 +1150,6 @@
 		return -EFAULT;
 
 	inode = page->mapping->host;
-	cifs_sb = CIFS_SB(inode->i_sb);
 
 	offset += (loff_t)from;
 	write_data = kmap(page);
@@ -1239,7 +1213,7 @@
 	struct pagevec pvec;
 	int rc = 0;
 	int scanned = 0;
-	int xid, long_op;
+	int xid;
 
 	cifs_sb = CIFS_SB(mapping->host->i_sb);
 
@@ -1377,43 +1351,67 @@
 				break;
 		}
 		if (n_iov) {
+retry_write:
 			open_file = find_writable_file(CIFS_I(mapping->host),
 							false);
 			if (!open_file) {
 				cERROR(1, "No writable handles for inode");
 				rc = -EBADF;
 			} else {
-				long_op = cifs_write_timeout(cifsi, offset);
 				rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
 						   bytes_to_write, offset,
 						   &bytes_written, iov, n_iov,
-						   long_op);
+						   0);
 				cifsFileInfo_put(open_file);
-				cifs_update_eof(cifsi, offset, bytes_written);
 			}
 
-			if (rc || bytes_written < bytes_to_write) {
-				cERROR(1, "Write2 ret %d, wrote %d",
-					  rc, bytes_written);
-				mapping_set_error(mapping, rc);
-			} else {
+			cFYI(1, "Write2 rc=%d, wrote=%u", rc, bytes_written);
+
+			/*
+			 * For now, treat a short write as if nothing got
+			 * written. A zero length write however indicates
+			 * ENOSPC or EFBIG. We have no way to know which
+			 * though, so call it ENOSPC for now. EFBIG would
+			 * get translated to AS_EIO anyway.
+			 *
+			 * FIXME: make it take into account the data that did
+			 *	  get written
+			 */
+			if (rc == 0) {
+				if (bytes_written == 0)
+					rc = -ENOSPC;
+				else if (bytes_written < bytes_to_write)
+					rc = -EAGAIN;
+			}
+
+			/* retry on data-integrity flush */
+			if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
+				goto retry_write;
+
+			/* fix the stats and EOF */
+			if (bytes_written > 0) {
 				cifs_stats_bytes_written(tcon, bytes_written);
+				cifs_update_eof(cifsi, offset, bytes_written);
 			}
 
 			for (i = 0; i < n_iov; i++) {
 				page = pvec.pages[first + i];
-				/* Should we also set page error on
-				success rc but too little data written? */
-				/* BB investigate retry logic on temporary
-				server crash cases and how recovery works
-				when page marked as error */
-				if (rc)
+				/* on retryable write error, redirty page */
+				if (rc == -EAGAIN)
+					redirty_page_for_writepage(wbc, page);
+				else if (rc != 0)
 					SetPageError(page);
 				kunmap(page);
 				unlock_page(page);
 				end_page_writeback(page);
 				page_cache_release(page);
 			}
+
+			if (rc != -EAGAIN)
+				mapping_set_error(mapping, rc);
+			else
+				rc = 0;
+
 			if ((wbc->nr_to_write -= n_iov) <= 0)
 				done = 1;
 			index = next;
@@ -1525,27 +1523,47 @@
 	return rc;
 }
 
-int cifs_fsync(struct file *file, int datasync)
+int cifs_strict_fsync(struct file *file, int datasync)
 {
 	int xid;
 	int rc = 0;
 	struct cifsTconInfo *tcon;
 	struct cifsFileInfo *smbfile = file->private_data;
 	struct inode *inode = file->f_path.dentry->d_inode;
+	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
 
 	xid = GetXid();
 
 	cFYI(1, "Sync file - name: %s datasync: 0x%x",
 		file->f_path.dentry->d_name.name, datasync);
 
-	rc = filemap_write_and_wait(inode->i_mapping);
-	if (rc == 0) {
-		struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+	if (!CIFS_I(inode)->clientCanCacheRead)
+		cifs_invalidate_mapping(inode);
 
-		tcon = tlink_tcon(smbfile->tlink);
-		if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
-			rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
-	}
+	tcon = tlink_tcon(smbfile->tlink);
+	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
+		rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
+
+	FreeXid(xid);
+	return rc;
+}
+
+int cifs_fsync(struct file *file, int datasync)
+{
+	int xid;
+	int rc = 0;
+	struct cifsTconInfo *tcon;
+	struct cifsFileInfo *smbfile = file->private_data;
+	struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
+
+	xid = GetXid();
+
+	cFYI(1, "Sync file - name: %s datasync: 0x%x",
+		file->f_path.dentry->d_name.name, datasync);
+
+	tcon = tlink_tcon(smbfile->tlink);
+	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC))
+		rc = CIFSSMBFlush(xid, tcon, smbfile->netfid);
 
 	FreeXid(xid);
 	return rc;
@@ -1596,42 +1614,244 @@
 	return rc;
 }
 
-ssize_t cifs_user_read(struct file *file, char __user *read_data,
-	size_t read_size, loff_t *poffset)
+static int
+cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
 {
-	int rc = -EACCES;
+	int rc = 0;
+	unsigned long i;
+
+	for (i = 0; i < num_pages; i++) {
+		pages[i] = alloc_page(__GFP_HIGHMEM);
+		if (!pages[i]) {
+			/*
+			 * save number of pages we have already allocated and
+			 * return with ENOMEM error
+			 */
+			num_pages = i;
+			rc = -ENOMEM;
+			goto error;
+		}
+	}
+
+	return rc;
+
+error:
+	for (i = 0; i < num_pages; i++)
+		put_page(pages[i]);
+	return rc;
+}
+
+static inline
+size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
+{
+	size_t num_pages;
+	size_t clen;
+
+	clen = min_t(const size_t, len, wsize);
+	num_pages = clen / PAGE_CACHE_SIZE;
+	if (clen % PAGE_CACHE_SIZE)
+		num_pages++;
+
+	if (cur_len)
+		*cur_len = clen;
+
+	return num_pages;
+}
+
+static ssize_t
+cifs_iovec_write(struct file *file, const struct iovec *iov,
+		 unsigned long nr_segs, loff_t *poffset)
+{
+	unsigned int written;
+	unsigned long num_pages, npages, i;
+	size_t copied, len, cur_len;
+	ssize_t total_written = 0;
+	struct kvec *to_send;
+	struct page **pages;
+	struct iov_iter it;
+	struct inode *inode;
+	struct cifsFileInfo *open_file;
+	struct cifsTconInfo *pTcon;
+	struct cifs_sb_info *cifs_sb;
+	int xid, rc;
+
+	len = iov_length(iov, nr_segs);
+	if (!len)
+		return 0;
+
+	rc = generic_write_checks(file, poffset, &len, 0);
+	if (rc)
+		return rc;
+
+	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
+	num_pages = get_numpages(cifs_sb->wsize, len, &cur_len);
+
+	pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL);
+	if (!pages)
+		return -ENOMEM;
+
+	to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL);
+	if (!to_send) {
+		kfree(pages);
+		return -ENOMEM;
+	}
+
+	rc = cifs_write_allocate_pages(pages, num_pages);
+	if (rc) {
+		kfree(pages);
+		kfree(to_send);
+		return rc;
+	}
+
+	xid = GetXid();
+	open_file = file->private_data;
+	pTcon = tlink_tcon(open_file->tlink);
+	inode = file->f_path.dentry->d_inode;
+
+	iov_iter_init(&it, iov, nr_segs, len, 0);
+	npages = num_pages;
+
+	do {
+		size_t save_len = cur_len;
+		for (i = 0; i < npages; i++) {
+			copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE);
+			copied = iov_iter_copy_from_user(pages[i], &it, 0,
+							 copied);
+			cur_len -= copied;
+			iov_iter_advance(&it, copied);
+			to_send[i+1].iov_base = kmap(pages[i]);
+			to_send[i+1].iov_len = copied;
+		}
+
+		cur_len = save_len - cur_len;
+
+		do {
+			if (open_file->invalidHandle) {
+				rc = cifs_reopen_file(open_file, false);
+				if (rc != 0)
+					break;
+			}
+			rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid,
+					   cur_len, *poffset, &written,
+					   to_send, npages, 0);
+		} while (rc == -EAGAIN);
+
+		for (i = 0; i < npages; i++)
+			kunmap(pages[i]);
+
+		if (written) {
+			len -= written;
+			total_written += written;
+			cifs_update_eof(CIFS_I(inode), *poffset, written);
+			*poffset += written;
+		} else if (rc < 0) {
+			if (!total_written)
+				total_written = rc;
+			break;
+		}
+
+		/* get length and number of kvecs of the next write */
+		npages = get_numpages(cifs_sb->wsize, len, &cur_len);
+	} while (len > 0);
+
+	if (total_written > 0) {
+		spin_lock(&inode->i_lock);
+		if (*poffset > inode->i_size)
+			i_size_write(inode, *poffset);
+		spin_unlock(&inode->i_lock);
+	}
+
+	cifs_stats_bytes_written(pTcon, total_written);
+	mark_inode_dirty_sync(inode);
+
+	for (i = 0; i < num_pages; i++)
+		put_page(pages[i]);
+	kfree(to_send);
+	kfree(pages);
+	FreeXid(xid);
+	return total_written;
+}
+
+static ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,
+				unsigned long nr_segs, loff_t pos)
+{
+	ssize_t written;
+	struct inode *inode;
+
+	inode = iocb->ki_filp->f_path.dentry->d_inode;
+
+	/*
+	 * BB - optimize the way when signing is disabled. We can drop this
+	 * extra memory-to-memory copying and use iovec buffers for constructing
+	 * write request.
+	 */
+
+	written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos);
+	if (written > 0) {
+		CIFS_I(inode)->invalid_mapping = true;
+		iocb->ki_pos = pos;
+	}
+
+	return written;
+}
+
+ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
+			   unsigned long nr_segs, loff_t pos)
+{
+	struct inode *inode;
+
+	inode = iocb->ki_filp->f_path.dentry->d_inode;
+
+	if (CIFS_I(inode)->clientCanCacheAll)
+		return generic_file_aio_write(iocb, iov, nr_segs, pos);
+
+	/*
+	 * In strict cache mode we need to write the data to the server exactly
+	 * from the pos to pos+len-1 rather than flush all affected pages
+	 * because it may cause a error with mandatory locks on these pages but
+	 * not on the region from pos to ppos+len-1.
+	 */
+
+	return cifs_user_writev(iocb, iov, nr_segs, pos);
+}
+
+static ssize_t
+cifs_iovec_read(struct file *file, const struct iovec *iov,
+		 unsigned long nr_segs, loff_t *poffset)
+{
+	int rc;
+	int xid;
+	ssize_t total_read;
 	unsigned int bytes_read = 0;
-	unsigned int total_read = 0;
-	unsigned int current_read_size;
+	size_t len, cur_len;
+	int iov_offset = 0;
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
-	int xid;
 	struct cifsFileInfo *open_file;
-	char *smb_read_data;
-	char __user *current_offset;
 	struct smb_com_read_rsp *pSMBr;
+	char *read_data;
+
+	if (!nr_segs)
+		return 0;
+
+	len = iov_length(iov, nr_segs);
+	if (!len)
+		return 0;
 
 	xid = GetXid();
 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
 
-	if (file->private_data == NULL) {
-		rc = -EBADF;
-		FreeXid(xid);
-		return rc;
-	}
 	open_file = file->private_data;
 	pTcon = tlink_tcon(open_file->tlink);
 
 	if ((file->f_flags & O_ACCMODE) == O_WRONLY)
 		cFYI(1, "attempting read on write only file instance");
 
-	for (total_read = 0, current_offset = read_data;
-	     read_size > total_read;
-	     total_read += bytes_read, current_offset += bytes_read) {
-		current_read_size = min_t(const int, read_size - total_read,
-					  cifs_sb->rsize);
+	for (total_read = 0; total_read < len; total_read += bytes_read) {
+		cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
 		rc = -EAGAIN;
-		smb_read_data = NULL;
+		read_data = NULL;
+
 		while (rc == -EAGAIN) {
 			int buf_type = CIFS_NO_BUFFER;
 			if (open_file->invalidHandle) {
@@ -1639,27 +1859,25 @@
 				if (rc != 0)
 					break;
 			}
-			rc = CIFSSMBRead(xid, pTcon,
-					 open_file->netfid,
-					 current_read_size, *poffset,
-					 &bytes_read, &smb_read_data,
-					 &buf_type);
-			pSMBr = (struct smb_com_read_rsp *)smb_read_data;
-			if (smb_read_data) {
-				if (copy_to_user(current_offset,
-						smb_read_data +
-						4 /* RFC1001 length field */ +
-						le16_to_cpu(pSMBr->DataOffset),
-						bytes_read))
+			rc = CIFSSMBRead(xid, pTcon, open_file->netfid,
+					 cur_len, *poffset, &bytes_read,
+					 &read_data, &buf_type);
+			pSMBr = (struct smb_com_read_rsp *)read_data;
+			if (read_data) {
+				char *data_offset = read_data + 4 +
+						le16_to_cpu(pSMBr->DataOffset);
+				if (memcpy_toiovecend(iov, data_offset,
+						      iov_offset, bytes_read))
 					rc = -EFAULT;
-
 				if (buf_type == CIFS_SMALL_BUFFER)
-					cifs_small_buf_release(smb_read_data);
+					cifs_small_buf_release(read_data);
 				else if (buf_type == CIFS_LARGE_BUFFER)
-					cifs_buf_release(smb_read_data);
-				smb_read_data = NULL;
+					cifs_buf_release(read_data);
+				read_data = NULL;
+				iov_offset += bytes_read;
 			}
 		}
+
 		if (rc || (bytes_read == 0)) {
 			if (total_read) {
 				break;
@@ -1672,13 +1890,57 @@
 			*poffset += bytes_read;
 		}
 	}
+
 	FreeXid(xid);
 	return total_read;
 }
 
+ssize_t cifs_user_read(struct file *file, char __user *read_data,
+		       size_t read_size, loff_t *poffset)
+{
+	struct iovec iov;
+	iov.iov_base = read_data;
+	iov.iov_len = read_size;
+
+	return cifs_iovec_read(file, &iov, 1, poffset);
+}
+
+static ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
+			       unsigned long nr_segs, loff_t pos)
+{
+	ssize_t read;
+
+	read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
+	if (read > 0)
+		iocb->ki_pos = pos;
+
+	return read;
+}
+
+ssize_t cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,
+			  unsigned long nr_segs, loff_t pos)
+{
+	struct inode *inode;
+
+	inode = iocb->ki_filp->f_path.dentry->d_inode;
+
+	if (CIFS_I(inode)->clientCanCacheRead)
+		return generic_file_aio_read(iocb, iov, nr_segs, pos);
+
+	/*
+	 * In strict cache mode we need to read from the server all the time
+	 * if we don't have level II oplock because the server can delay mtime
+	 * change - so we can't make a decision about inode invalidating.
+	 * And we can also fail with pagereading if there are mandatory locks
+	 * on pages affected by this read but not on the region from pos to
+	 * pos+len-1.
+	 */
+
+	return cifs_user_readv(iocb, iov, nr_segs, pos);
+}
 
 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
-	loff_t *poffset)
+			 loff_t *poffset)
 {
 	int rc = -EACCES;
 	unsigned int bytes_read = 0;
@@ -1746,6 +2008,21 @@
 	return total_read;
 }
 
+int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	int rc, xid;
+	struct inode *inode = file->f_path.dentry->d_inode;
+
+	xid = GetXid();
+
+	if (!CIFS_I(inode)->clientCanCacheRead)
+		cifs_invalidate_mapping(inode);
+
+	rc = generic_file_mmap(file, vma);
+	FreeXid(xid);
+	return rc;
+}
+
 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
 {
 	int rc, xid;
@@ -2192,7 +2469,8 @@
 	 */
 	if (!cfile->oplock_break_cancelled) {
 		rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0,
-				 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false);
+				 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false,
+				 cinode->clientCanCacheRead ? 1 : 0);
 		cFYI(1, "Oplock release rc = %d", rc);
 	}
 
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 6c9ee80..8852470 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -44,13 +44,17 @@
 				inode->i_fop = &cifs_file_direct_nobrl_ops;
 			else
 				inode->i_fop = &cifs_file_direct_ops;
+		} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
+			if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+				inode->i_fop = &cifs_file_strict_nobrl_ops;
+			else
+				inode->i_fop = &cifs_file_strict_ops;
 		} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
 			inode->i_fop = &cifs_file_nobrl_ops;
 		else { /* not direct, send byte range locks */
 			inode->i_fop = &cifs_file_ops;
 		}
 
-
 		/* check if server can support readpages */
 		if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf <
 				PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
@@ -1679,7 +1683,7 @@
 /*
  * Zap the cache. Called when invalid_mapping flag is set.
  */
-static void
+void
 cifs_invalidate_mapping(struct inode *inode)
 {
 	int rc;
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 306769d..e8804d3 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -28,7 +28,6 @@
 #include "cifsproto.h"
 #include "cifs_debug.h"
 #include "cifs_fs_sb.h"
-#include "md5.h"
 
 #define CIFS_MF_SYMLINK_LEN_OFFSET (4+1)
 #define CIFS_MF_SYMLINK_MD5_OFFSET (CIFS_MF_SYMLINK_LEN_OFFSET+(4+1))
@@ -47,6 +46,45 @@
 	md5_hash[12], md5_hash[13], md5_hash[14], md5_hash[15]
 
 static int
+symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash)
+{
+	int rc;
+	unsigned int size;
+	struct crypto_shash *md5;
+	struct sdesc *sdescmd5;
+
+	md5 = crypto_alloc_shash("md5", 0, 0);
+	if (IS_ERR(md5)) {
+		rc = PTR_ERR(md5);
+		cERROR(1, "%s: Crypto md5 allocation error %d\n", __func__, rc);
+		return rc;
+	}
+	size = sizeof(struct shash_desc) + crypto_shash_descsize(md5);
+	sdescmd5 = kmalloc(size, GFP_KERNEL);
+	if (!sdescmd5) {
+		rc = -ENOMEM;
+		cERROR(1, "%s: Memory allocation failure\n", __func__);
+		goto symlink_hash_err;
+	}
+	sdescmd5->shash.tfm = md5;
+	sdescmd5->shash.flags = 0x0;
+
+	rc = crypto_shash_init(&sdescmd5->shash);
+	if (rc) {
+		cERROR(1, "%s: Could not init md5 shash\n", __func__);
+		goto symlink_hash_err;
+	}
+	crypto_shash_update(&sdescmd5->shash, link_str, link_len);
+	rc = crypto_shash_final(&sdescmd5->shash, md5_hash);
+
+symlink_hash_err:
+	crypto_free_shash(md5);
+	kfree(sdescmd5);
+
+	return rc;
+}
+
+static int
 CIFSParseMFSymlink(const u8 *buf,
 		   unsigned int buf_len,
 		   unsigned int *_link_len,
@@ -56,7 +94,6 @@
 	unsigned int link_len;
 	const char *md5_str1;
 	const char *link_str;
-	struct MD5Context md5_ctx;
 	u8 md5_hash[16];
 	char md5_str2[34];
 
@@ -70,9 +107,11 @@
 	if (rc != 1)
 		return -EINVAL;
 
-	cifs_MD5_init(&md5_ctx);
-	cifs_MD5_update(&md5_ctx, (const u8 *)link_str, link_len);
-	cifs_MD5_final(md5_hash, &md5_ctx);
+	rc = symlink_hash(link_len, link_str, md5_hash);
+	if (rc) {
+		cFYI(1, "%s: MD5 hash failure: %d\n", __func__, rc);
+		return rc;
+	}
 
 	snprintf(md5_str2, sizeof(md5_str2),
 		 CIFS_MF_SYMLINK_MD5_FORMAT,
@@ -94,9 +133,9 @@
 static int
 CIFSFormatMFSymlink(u8 *buf, unsigned int buf_len, const char *link_str)
 {
+	int rc;
 	unsigned int link_len;
 	unsigned int ofs;
-	struct MD5Context md5_ctx;
 	u8 md5_hash[16];
 
 	if (buf_len != CIFS_MF_SYMLINK_FILE_SIZE)
@@ -107,9 +146,11 @@
 	if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
 		return -ENAMETOOLONG;
 
-	cifs_MD5_init(&md5_ctx);
-	cifs_MD5_update(&md5_ctx, (const u8 *)link_str, link_len);
-	cifs_MD5_final(md5_hash, &md5_ctx);
+	rc = symlink_hash(link_len, link_str, md5_hash);
+	if (rc) {
+		cFYI(1, "%s: MD5 hash failure: %d\n", __func__, rc);
+		return rc;
+	}
 
 	snprintf(buf, buf_len,
 		 CIFS_MF_SYMLINK_LEN_FORMAT CIFS_MF_SYMLINK_MD5_FORMAT,
diff --git a/fs/cifs/md4.c b/fs/cifs/md4.c
deleted file mode 100644
index a725c26..0000000
--- a/fs/cifs/md4.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
-   Unix SMB/Netbios implementation.
-   Version 1.9.
-   a implementation of MD4 designed for use in the SMB authentication protocol
-   Copyright (C) Andrew Tridgell 1997-1998.
-   Modified by Steve French (sfrench@us.ibm.com) 2002-2003
-
-   This program is free software; you can redistribute it and/or modify
-   it under the terms of the GNU General Public License as published by
-   the Free Software Foundation; either version 2 of the License, or
-   (at your option) any later version.
-
-   This program is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-   GNU General Public License for more details.
-
-   You should have received a copy of the GNU General Public License
-   along with this program; if not, write to the Free Software
-   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-#include <linux/module.h>
-#include <linux/fs.h>
-#include "cifsencrypt.h"
-
-/* NOTE: This code makes no attempt to be fast! */
-
-static __u32
-F(__u32 X, __u32 Y, __u32 Z)
-{
-	return (X & Y) | ((~X) & Z);
-}
-
-static __u32
-G(__u32 X, __u32 Y, __u32 Z)
-{
-	return (X & Y) | (X & Z) | (Y & Z);
-}
-
-static __u32
-H(__u32 X, __u32 Y, __u32 Z)
-{
-	return X ^ Y ^ Z;
-}
-
-static __u32
-lshift(__u32 x, int s)
-{
-	x &= 0xFFFFFFFF;
-	return ((x << s) & 0xFFFFFFFF) | (x >> (32 - s));
-}
-
-#define ROUND1(a,b,c,d,k,s) (*a) = lshift((*a) + F(*b,*c,*d) + X[k], s)
-#define ROUND2(a,b,c,d,k,s) (*a) = lshift((*a) + G(*b,*c,*d) + X[k] + (__u32)0x5A827999,s)
-#define ROUND3(a,b,c,d,k,s) (*a) = lshift((*a) + H(*b,*c,*d) + X[k] + (__u32)0x6ED9EBA1,s)
-
-/* this applies md4 to 64 byte chunks */
-static void
-mdfour64(__u32 *M, __u32 *A, __u32 *B, __u32 *C, __u32 *D)
-{
-	int j;
-	__u32 AA, BB, CC, DD;
-	__u32 X[16];
-
-
-	for (j = 0; j < 16; j++)
-		X[j] = M[j];
-
-	AA = *A;
-	BB = *B;
-	CC = *C;
-	DD = *D;
-
-	ROUND1(A, B, C, D, 0, 3);
-	ROUND1(D, A, B, C, 1, 7);
-	ROUND1(C, D, A, B, 2, 11);
-	ROUND1(B, C, D, A, 3, 19);
-	ROUND1(A, B, C, D, 4, 3);
-	ROUND1(D, A, B, C, 5, 7);
-	ROUND1(C, D, A, B, 6, 11);
-	ROUND1(B, C, D, A, 7, 19);
-	ROUND1(A, B, C, D, 8, 3);
-	ROUND1(D, A, B, C, 9, 7);
-	ROUND1(C, D, A, B, 10, 11);
-	ROUND1(B, C, D, A, 11, 19);
-	ROUND1(A, B, C, D, 12, 3);
-	ROUND1(D, A, B, C, 13, 7);
-	ROUND1(C, D, A, B, 14, 11);
-	ROUND1(B, C, D, A, 15, 19);
-
-	ROUND2(A, B, C, D, 0, 3);
-	ROUND2(D, A, B, C, 4, 5);
-	ROUND2(C, D, A, B, 8, 9);
-	ROUND2(B, C, D, A, 12, 13);
-	ROUND2(A, B, C, D, 1, 3);
-	ROUND2(D, A, B, C, 5, 5);
-	ROUND2(C, D, A, B, 9, 9);
-	ROUND2(B, C, D, A, 13, 13);
-	ROUND2(A, B, C, D, 2, 3);
-	ROUND2(D, A, B, C, 6, 5);
-	ROUND2(C, D, A, B, 10, 9);
-	ROUND2(B, C, D, A, 14, 13);
-	ROUND2(A, B, C, D, 3, 3);
-	ROUND2(D, A, B, C, 7, 5);
-	ROUND2(C, D, A, B, 11, 9);
-	ROUND2(B, C, D, A, 15, 13);
-
-	ROUND3(A, B, C, D, 0, 3);
-	ROUND3(D, A, B, C, 8, 9);
-	ROUND3(C, D, A, B, 4, 11);
-	ROUND3(B, C, D, A, 12, 15);
-	ROUND3(A, B, C, D, 2, 3);
-	ROUND3(D, A, B, C, 10, 9);
-	ROUND3(C, D, A, B, 6, 11);
-	ROUND3(B, C, D, A, 14, 15);
-	ROUND3(A, B, C, D, 1, 3);
-	ROUND3(D, A, B, C, 9, 9);
-	ROUND3(C, D, A, B, 5, 11);
-	ROUND3(B, C, D, A, 13, 15);
-	ROUND3(A, B, C, D, 3, 3);
-	ROUND3(D, A, B, C, 11, 9);
-	ROUND3(C, D, A, B, 7, 11);
-	ROUND3(B, C, D, A, 15, 15);
-
-	*A += AA;
-	*B += BB;
-	*C += CC;
-	*D += DD;
-
-	*A &= 0xFFFFFFFF;
-	*B &= 0xFFFFFFFF;
-	*C &= 0xFFFFFFFF;
-	*D &= 0xFFFFFFFF;
-
-	for (j = 0; j < 16; j++)
-		X[j] = 0;
-}
-
-static void
-copy64(__u32 *M, unsigned char *in)
-{
-	int i;
-
-	for (i = 0; i < 16; i++)
-		M[i] = (in[i * 4 + 3] << 24) | (in[i * 4 + 2] << 16) |
-		    (in[i * 4 + 1] << 8) | (in[i * 4 + 0] << 0);
-}
-
-static void
-copy4(unsigned char *out, __u32 x)
-{
-	out[0] = x & 0xFF;
-	out[1] = (x >> 8) & 0xFF;
-	out[2] = (x >> 16) & 0xFF;
-	out[3] = (x >> 24) & 0xFF;
-}
-
-/* produce a md4 message digest from data of length n bytes */
-void
-mdfour(unsigned char *out, unsigned char *in, int n)
-{
-	unsigned char buf[128];
-	__u32 M[16];
-	__u32 b = n * 8;
-	int i;
-	__u32 A = 0x67452301;
-	__u32 B = 0xefcdab89;
-	__u32 C = 0x98badcfe;
-	__u32 D = 0x10325476;
-
-	while (n > 64) {
-		copy64(M, in);
-		mdfour64(M, &A, &B, &C, &D);
-		in += 64;
-		n -= 64;
-	}
-
-	for (i = 0; i < 128; i++)
-		buf[i] = 0;
-	memcpy(buf, in, n);
-	buf[n] = 0x80;
-
-	if (n <= 55) {
-		copy4(buf + 56, b);
-		copy64(M, buf);
-		mdfour64(M, &A, &B, &C, &D);
-	} else {
-		copy4(buf + 120, b);
-		copy64(M, buf);
-		mdfour64(M, &A, &B, &C, &D);
-		copy64(M, buf + 64);
-		mdfour64(M, &A, &B, &C, &D);
-	}
-
-	for (i = 0; i < 128; i++)
-		buf[i] = 0;
-	copy64(M, buf);
-
-	copy4(out, A);
-	copy4(out + 4, B);
-	copy4(out + 8, C);
-	copy4(out + 12, D);
-
-	A = B = C = D = 0;
-}
diff --git a/fs/cifs/md5.c b/fs/cifs/md5.c
deleted file mode 100644
index 98b66a5..0000000
--- a/fs/cifs/md5.c
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * This code implements the MD5 message-digest algorithm.
- * The algorithm is due to Ron Rivest.  This code was
- * written by Colin Plumb in 1993, no copyright is claimed.
- * This code is in the public domain; do with it what you wish.
- *
- * Equivalent code is available from RSA Data Security, Inc.
- * This code has been tested against that, and is equivalent,
- * except that you don't need to include two pages of legalese
- * with every copy.
- *
- * To compute the message digest of a chunk of bytes, declare an
- * MD5Context structure, pass it to cifs_MD5_init, call cifs_MD5_update as
- * needed on buffers full of bytes, and then call cifs_MD5_final, which
- * will fill a supplied 16-byte array with the digest.
- */
-
-/* This code slightly modified to fit into Samba by
-   abartlet@samba.org Jun 2001
-   and to fit the cifs vfs by
-   Steve French sfrench@us.ibm.com */
-
-#include <linux/string.h>
-#include "md5.h"
-
-static void MD5Transform(__u32 buf[4], __u32 const in[16]);
-
-/*
- * Note: this code is harmless on little-endian machines.
- */
-static void
-byteReverse(unsigned char *buf, unsigned longs)
-{
-	__u32 t;
-	do {
-		t = (__u32) ((unsigned) buf[3] << 8 | buf[2]) << 16 |
-		    ((unsigned) buf[1] << 8 | buf[0]);
-		*(__u32 *) buf = t;
-		buf += 4;
-	} while (--longs);
-}
-
-/*
- * Start MD5 accumulation.  Set bit count to 0 and buffer to mysterious
- * initialization constants.
- */
-void
-cifs_MD5_init(struct MD5Context *ctx)
-{
-	ctx->buf[0] = 0x67452301;
-	ctx->buf[1] = 0xefcdab89;
-	ctx->buf[2] = 0x98badcfe;
-	ctx->buf[3] = 0x10325476;
-
-	ctx->bits[0] = 0;
-	ctx->bits[1] = 0;
-}
-
-/*
- * Update context to reflect the concatenation of another buffer full
- * of bytes.
- */
-void
-cifs_MD5_update(struct MD5Context *ctx, unsigned char const *buf, unsigned len)
-{
-	register __u32 t;
-
-	/* Update bitcount */
-
-	t = ctx->bits[0];
-	if ((ctx->bits[0] = t + ((__u32) len << 3)) < t)
-		ctx->bits[1]++;	/* Carry from low to high */
-	ctx->bits[1] += len >> 29;
-
-	t = (t >> 3) & 0x3f;	/* Bytes already in shsInfo->data */
-
-	/* Handle any leading odd-sized chunks */
-
-	if (t) {
-		unsigned char *p = (unsigned char *) ctx->in + t;
-
-		t = 64 - t;
-		if (len < t) {
-			memmove(p, buf, len);
-			return;
-		}
-		memmove(p, buf, t);
-		byteReverse(ctx->in, 16);
-		MD5Transform(ctx->buf, (__u32 *) ctx->in);
-		buf += t;
-		len -= t;
-	}
-	/* Process data in 64-byte chunks */
-
-	while (len >= 64) {
-		memmove(ctx->in, buf, 64);
-		byteReverse(ctx->in, 16);
-		MD5Transform(ctx->buf, (__u32 *) ctx->in);
-		buf += 64;
-		len -= 64;
-	}
-
-	/* Handle any remaining bytes of data. */
-
-	memmove(ctx->in, buf, len);
-}
-
-/*
- * Final wrapup - pad to 64-byte boundary with the bit pattern
- * 1 0* (64-bit count of bits processed, MSB-first)
- */
-void
-cifs_MD5_final(unsigned char digest[16], struct MD5Context *ctx)
-{
-	unsigned int count;
-	unsigned char *p;
-
-	/* Compute number of bytes mod 64 */
-	count = (ctx->bits[0] >> 3) & 0x3F;
-
-	/* Set the first char of padding to 0x80.  This is safe since there is
-	   always at least one byte free */
-	p = ctx->in + count;
-	*p++ = 0x80;
-
-	/* Bytes of padding needed to make 64 bytes */
-	count = 64 - 1 - count;
-
-	/* Pad out to 56 mod 64 */
-	if (count < 8) {
-		/* Two lots of padding:  Pad the first block to 64 bytes */
-		memset(p, 0, count);
-		byteReverse(ctx->in, 16);
-		MD5Transform(ctx->buf, (__u32 *) ctx->in);
-
-		/* Now fill the next block with 56 bytes */
-		memset(ctx->in, 0, 56);
-	} else {
-		/* Pad block to 56 bytes */
-		memset(p, 0, count - 8);
-	}
-	byteReverse(ctx->in, 14);
-
-	/* Append length in bits and transform */
-	((__u32 *) ctx->in)[14] = ctx->bits[0];
-	((__u32 *) ctx->in)[15] = ctx->bits[1];
-
-	MD5Transform(ctx->buf, (__u32 *) ctx->in);
-	byteReverse((unsigned char *) ctx->buf, 4);
-	memmove(digest, ctx->buf, 16);
-	memset(ctx, 0, sizeof(*ctx));	/* In case it's sensitive */
-}
-
-/* The four core functions - F1 is optimized somewhat */
-
-/* #define F1(x, y, z) (x & y | ~x & z) */
-#define F1(x, y, z) (z ^ (x & (y ^ z)))
-#define F2(x, y, z) F1(z, x, y)
-#define F3(x, y, z) (x ^ y ^ z)
-#define F4(x, y, z) (y ^ (x | ~z))
-
-/* This is the central step in the MD5 algorithm. */
-#define MD5STEP(f, w, x, y, z, data, s) \
-	(w += f(x, y, z) + data,  w = w<<s | w>>(32-s),  w += x)
-
-/*
- * The core of the MD5 algorithm, this alters an existing MD5 hash to
- * reflect the addition of 16 longwords of new data.  cifs_MD5_update blocks
- * the data and converts bytes into longwords for this routine.
- */
-static void
-MD5Transform(__u32 buf[4], __u32 const in[16])
-{
-	register __u32 a, b, c, d;
-
-	a = buf[0];
-	b = buf[1];
-	c = buf[2];
-	d = buf[3];
-
-	MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
-	MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
-	MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
-	MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
-	MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
-	MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
-	MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
-	MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
-	MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
-	MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
-	MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
-	MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
-	MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
-	MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
-	MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
-	MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
-
-	MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
-	MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
-	MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
-	MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
-	MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
-	MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
-	MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
-	MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
-	MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
-	MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
-	MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
-	MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
-	MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
-	MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
-	MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
-	MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
-
-	MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
-	MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
-	MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
-	MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
-	MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
-	MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
-	MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
-	MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
-	MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
-	MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
-	MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
-	MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
-	MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
-	MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
-	MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
-	MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
-
-	MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
-	MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
-	MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
-	MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
-	MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
-	MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
-	MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
-	MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
-	MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
-	MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
-	MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
-	MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
-	MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
-	MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
-	MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
-	MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
-
-	buf[0] += a;
-	buf[1] += b;
-	buf[2] += c;
-	buf[3] += d;
-}
-
-#if 0   /* currently unused */
-/***********************************************************************
- the rfc 2104 version of hmac_md5 initialisation.
-***********************************************************************/
-static void
-hmac_md5_init_rfc2104(unsigned char *key, int key_len,
-		      struct HMACMD5Context *ctx)
-{
-	int i;
-
-	/* if key is longer than 64 bytes reset it to key=MD5(key) */
-	if (key_len > 64) {
-		unsigned char tk[16];
-		struct MD5Context tctx;
-
-		cifs_MD5_init(&tctx);
-		cifs_MD5_update(&tctx, key, key_len);
-		cifs_MD5_final(tk, &tctx);
-
-		key = tk;
-		key_len = 16;
-	}
-
-	/* start out by storing key in pads */
-	memset(ctx->k_ipad, 0, sizeof(ctx->k_ipad));
-	memset(ctx->k_opad, 0, sizeof(ctx->k_opad));
-	memcpy(ctx->k_ipad, key, key_len);
-	memcpy(ctx->k_opad, key, key_len);
-
-	/* XOR key with ipad and opad values */
-	for (i = 0; i < 64; i++) {
-		ctx->k_ipad[i] ^= 0x36;
-		ctx->k_opad[i] ^= 0x5c;
-	}
-
-	cifs_MD5_init(&ctx->ctx);
-	cifs_MD5_update(&ctx->ctx, ctx->k_ipad, 64);
-}
-#endif
-
-/***********************************************************************
- the microsoft version of hmac_md5 initialisation.
-***********************************************************************/
-void
-hmac_md5_init_limK_to_64(const unsigned char *key, int key_len,
-			 struct HMACMD5Context *ctx)
-{
-	int i;
-
-	/* if key is longer than 64 bytes truncate it */
-	if (key_len > 64)
-		key_len = 64;
-
-	/* start out by storing key in pads */
-	memset(ctx->k_ipad, 0, sizeof(ctx->k_ipad));
-	memset(ctx->k_opad, 0, sizeof(ctx->k_opad));
-	memcpy(ctx->k_ipad, key, key_len);
-	memcpy(ctx->k_opad, key, key_len);
-
-	/* XOR key with ipad and opad values */
-	for (i = 0; i < 64; i++) {
-		ctx->k_ipad[i] ^= 0x36;
-		ctx->k_opad[i] ^= 0x5c;
-	}
-
-	cifs_MD5_init(&ctx->ctx);
-	cifs_MD5_update(&ctx->ctx, ctx->k_ipad, 64);
-}
-
-/***********************************************************************
- update hmac_md5 "inner" buffer
-***********************************************************************/
-void
-hmac_md5_update(const unsigned char *text, int text_len,
-		struct HMACMD5Context *ctx)
-{
-	cifs_MD5_update(&ctx->ctx, text, text_len);	/* then text of datagram */
-}
-
-/***********************************************************************
- finish off hmac_md5 "inner" buffer and generate outer one.
-***********************************************************************/
-void
-hmac_md5_final(unsigned char *digest, struct HMACMD5Context *ctx)
-{
-	struct MD5Context ctx_o;
-
-	cifs_MD5_final(digest, &ctx->ctx);
-
-	cifs_MD5_init(&ctx_o);
-	cifs_MD5_update(&ctx_o, ctx->k_opad, 64);
-	cifs_MD5_update(&ctx_o, digest, 16);
-	cifs_MD5_final(digest, &ctx_o);
-}
-
-/***********************************************************
- single function to calculate an HMAC MD5 digest from data.
- use the microsoft hmacmd5 init method because the key is 16 bytes.
-************************************************************/
-#if 0 /* currently unused */
-static void
-hmac_md5(unsigned char key[16], unsigned char *data, int data_len,
-	 unsigned char *digest)
-{
-	struct HMACMD5Context ctx;
-	hmac_md5_init_limK_to_64(key, 16, &ctx);
-	if (data_len != 0)
-		hmac_md5_update(data, data_len, &ctx);
-
-	hmac_md5_final(digest, &ctx);
-}
-#endif
diff --git a/fs/cifs/md5.h b/fs/cifs/md5.h
deleted file mode 100644
index 6fba8cb..0000000
--- a/fs/cifs/md5.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef MD5_H
-#define MD5_H
-#ifndef HEADER_MD5_H
-/* Try to avoid clashes with OpenSSL */
-#define HEADER_MD5_H
-#endif
-
-struct MD5Context {
-	__u32 buf[4];
-	__u32 bits[2];
-	unsigned char in[64];
-};
-#endif				/* !MD5_H */
-
-#ifndef _HMAC_MD5_H
-struct HMACMD5Context {
-	struct MD5Context ctx;
-	unsigned char k_ipad[65];
-	unsigned char k_opad[65];
-};
-#endif				/* _HMAC_MD5_H */
-
-void cifs_MD5_init(struct MD5Context *context);
-void cifs_MD5_update(struct MD5Context *context, unsigned char const *buf,
-			unsigned len);
-void cifs_MD5_final(unsigned char digest[16], struct MD5Context *context);
-
-/* The following definitions come from lib/hmacmd5.c  */
-
-/* void hmac_md5_init_rfc2104(unsigned char *key, int key_len,
-			struct HMACMD5Context *ctx);*/
-void hmac_md5_init_limK_to_64(const unsigned char *key, int key_len,
-			struct HMACMD5Context *ctx);
-void hmac_md5_update(const unsigned char *text, int text_len,
-			struct HMACMD5Context *ctx);
-void hmac_md5_final(unsigned char *digest, struct HMACMD5Context *ctx);
-/* void hmac_md5(unsigned char key[16], unsigned char *data, int data_len,
-			unsigned char *digest);*/
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 43f1028..2a930a7 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -236,10 +236,7 @@
 {
 	__u16 mid = 0;
 	__u16 last_mid;
-	int   collision;
-
-	if (server == NULL)
-		return mid;
+	bool collision;
 
 	spin_lock(&GlobalMid_Lock);
 	last_mid = server->CurrentMid; /* we do not want to loop forever */
@@ -252,24 +249,38 @@
 	(and it would also have to have been a request that
 	 did not time out) */
 	while (server->CurrentMid != last_mid) {
-		struct list_head *tmp;
 		struct mid_q_entry *mid_entry;
+		unsigned int num_mids;
 
-		collision = 0;
+		collision = false;
 		if (server->CurrentMid == 0)
 			server->CurrentMid++;
 
-		list_for_each(tmp, &server->pending_mid_q) {
-			mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
-
-			if ((mid_entry->mid == server->CurrentMid) &&
-			    (mid_entry->midState == MID_REQUEST_SUBMITTED)) {
+		num_mids = 0;
+		list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
+			++num_mids;
+			if (mid_entry->mid == server->CurrentMid &&
+			    mid_entry->midState == MID_REQUEST_SUBMITTED) {
 				/* This mid is in use, try a different one */
-				collision = 1;
+				collision = true;
 				break;
 			}
 		}
-		if (collision == 0) {
+
+		/*
+		 * if we have more than 32k mids in the list, then something
+		 * is very wrong. Possibly a local user is trying to DoS the
+		 * box by issuing long-running calls and SIGKILL'ing them. If
+		 * we get to 2^16 mids then we're in big trouble as this
+		 * function could loop forever.
+		 *
+		 * Go ahead and assign out the mid in this situation, but force
+		 * an eventual reconnect to clean out the pending_mid_q.
+		 */
+		if (num_mids > 32768)
+			server->tcpStatus = CifsNeedReconnect;
+
+		if (!collision) {
 			mid = server->CurrentMid;
 			break;
 		}
@@ -381,29 +392,31 @@
 }
 
 static int
-checkSMBhdr(struct smb_hdr *smb, __u16 mid)
+check_smb_hdr(struct smb_hdr *smb, __u16 mid)
 {
-	/* Make sure that this really is an SMB, that it is a response,
-	   and that the message ids match */
-	if ((*(__le32 *) smb->Protocol == cpu_to_le32(0x424d53ff)) &&
-		(mid == smb->Mid)) {
-		if (smb->Flags & SMBFLG_RESPONSE)
-			return 0;
-		else {
-		/* only one valid case where server sends us request */
-			if (smb->Command == SMB_COM_LOCKING_ANDX)
-				return 0;
-			else
-				cERROR(1, "Received Request not response");
-		}
-	} else { /* bad signature or mid */
-		if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff))
-			cERROR(1, "Bad protocol string signature header %x",
-				*(unsigned int *) smb->Protocol);
-		if (mid != smb->Mid)
-			cERROR(1, "Mids do not match");
+	/* does it have the right SMB "signature" ? */
+	if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
+		cERROR(1, "Bad protocol string signature header 0x%x",
+			*(unsigned int *)smb->Protocol);
+		return 1;
 	}
-	cERROR(1, "bad smb detected. The Mid=%d", smb->Mid);
+
+	/* Make sure that message ids match */
+	if (mid != smb->Mid) {
+		cERROR(1, "Mids do not match. received=%u expected=%u",
+			smb->Mid, mid);
+		return 1;
+	}
+
+	/* if it's a response then accept */
+	if (smb->Flags & SMBFLG_RESPONSE)
+		return 0;
+
+	/* only one valid case where server sends us request */
+	if (smb->Command == SMB_COM_LOCKING_ANDX)
+		return 0;
+
+	cERROR(1, "Server sent request, not response. mid=%u", smb->Mid);
 	return 1;
 }
 
@@ -448,7 +461,7 @@
 		return 1;
 	}
 
-	if (checkSMBhdr(smb, mid))
+	if (check_smb_hdr(smb, mid))
 		return 1;
 	clc_len = smbCalcSize_LE(smb);
 
@@ -465,25 +478,26 @@
 			if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
 				return 0; /* bcc wrapped */
 		}
-		cFYI(1, "Calculated size %d vs length %d mismatch for mid %d",
+		cFYI(1, "Calculated size %u vs length %u mismatch for mid=%u",
 				clc_len, 4 + len, smb->Mid);
-		/* Windows XP can return a few bytes too much, presumably
-		an illegal pad, at the end of byte range lock responses
-		so we allow for that three byte pad, as long as actual
-		received length is as long or longer than calculated length */
-		/* We have now had to extend this more, since there is a
-		case in which it needs to be bigger still to handle a
-		malformed response to transact2 findfirst from WinXP when
-		access denied is returned and thus bcc and wct are zero
-		but server says length is 0x21 bytes too long as if the server
-		forget to reset the smb rfc1001 length when it reset the
-		wct and bcc to minimum size and drop the t2 parms and data */
-		if ((4+len > clc_len) && (len <= clc_len + 512))
-			return 0;
-		else {
-			cERROR(1, "RFC1001 size %d bigger than SMB for Mid=%d",
+
+		if (4 + len < clc_len) {
+			cERROR(1, "RFC1001 size %u smaller than SMB for mid=%u",
 					len, smb->Mid);
 			return 1;
+		} else if (len > clc_len + 512) {
+			/*
+			 * Some servers (Windows XP in particular) send more
+			 * data than the lengths in the SMB packet would
+			 * indicate on certain calls (byte range locks and
+			 * trans2 find first calls in particular). While the
+			 * client can handle such a frame by ignoring the
+			 * trailing data, we choose limit the amount of extra
+			 * data to 512 bytes.
+			 */
+			cERROR(1, "RFC1001 size %u more than 512 bytes larger "
+				  "than SMB for mid=%u", len, smb->Mid);
+			return 1;
 		}
 	}
 	return 0;
@@ -571,7 +585,7 @@
 				pCifsInode = CIFS_I(netfile->dentry->d_inode);
 
 				cifs_set_oplock_level(pCifsInode,
-						      pSMB->OplockLevel);
+					pSMB->OplockLevel ? OPLOCK_READ : 0);
 				/*
 				 * cifs_oplock_break_put() can't be called
 				 * from here.  Get reference after queueing
@@ -637,77 +651,6 @@
 	return;
 }
 
-/* Convert 16 bit Unicode pathname to wire format from string in current code
-   page.  Conversion may involve remapping up the seven characters that are
-   only legal in POSIX-like OS (if they are present in the string). Path
-   names are little endian 16 bit Unicode on the wire */
-int
-cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
-		 const struct nls_table *cp, int mapChars)
-{
-	int i, j, charlen;
-	int len_remaining = maxlen;
-	char src_char;
-	__u16 temp;
-
-	if (!mapChars)
-		return cifs_strtoUCS(target, source, PATH_MAX, cp);
-
-	for (i = 0, j = 0; i < maxlen; j++) {
-		src_char = source[i];
-		switch (src_char) {
-			case 0:
-				target[j] = 0;
-				goto ctoUCS_out;
-			case ':':
-				target[j] = cpu_to_le16(UNI_COLON);
-				break;
-			case '*':
-				target[j] = cpu_to_le16(UNI_ASTERIK);
-				break;
-			case '?':
-				target[j] = cpu_to_le16(UNI_QUESTION);
-				break;
-			case '<':
-				target[j] = cpu_to_le16(UNI_LESSTHAN);
-				break;
-			case '>':
-				target[j] = cpu_to_le16(UNI_GRTRTHAN);
-				break;
-			case '|':
-				target[j] = cpu_to_le16(UNI_PIPE);
-				break;
-			/* BB We can not handle remapping slash until
-			   all the calls to build_path_from_dentry
-			   are modified, as they use slash as separator BB */
-			/* case '\\':
-				target[j] = cpu_to_le16(UNI_SLASH);
-				break;*/
-			default:
-				charlen = cp->char2uni(source+i,
-					len_remaining, &temp);
-				/* if no match, use question mark, which
-				at least in some cases servers as wild card */
-				if (charlen < 1) {
-					target[j] = cpu_to_le16(0x003f);
-					charlen = 1;
-				} else
-					target[j] = cpu_to_le16(temp);
-				len_remaining -= charlen;
-				/* character may take more than one byte in the
-				   the source string, but will take exactly two
-				   bytes in the target string */
-				i += charlen;
-				continue;
-		}
-		i++; /* move to next char in source string */
-		len_remaining--;
-	}
-
-ctoUCS_out:
-	return i;
-}
-
 void
 cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
 {
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 6783ce6..8d9189f 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -916,14 +916,14 @@
 smbCalcSize(struct smb_hdr *ptr)
 {
 	return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) +
-		2 /* size of the bcc field */ + BCC(ptr));
+		2 /* size of the bcc field */ + get_bcc(ptr));
 }
 
 unsigned int
 smbCalcSize_LE(struct smb_hdr *ptr)
 {
 	return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) +
-		2 /* size of the bcc field */ + le16_to_cpu(BCC_LE(ptr)));
+		2 /* size of the bcc field */ + get_bcc_le(ptr));
 }
 
 /* The following are taken from fs/ntfs/util.c */
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 7f25cc3..f8e4cd2 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -764,7 +764,6 @@
 {
 	int rc = 0;
 	int xid, i;
-	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *pTcon;
 	struct cifsFileInfo *cifsFile = NULL;
 	char *current_entry;
@@ -775,8 +774,6 @@
 
 	xid = GetXid();
 
-	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
-
 	/*
 	 * Ensure FindFirst doesn't fail before doing filldir() for '.' and
 	 * '..'. Otherwise we won't be able to notify VFS in case of failure.
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index eb74648..1adc962 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -277,7 +277,7 @@
 }
 
 static void
-decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
+decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses,
 		      const struct nls_table *nls_cp)
 {
 	int len;
@@ -323,7 +323,7 @@
 	return;
 }
 
-static int decode_ascii_ssetup(char **pbcc_area, int bleft,
+static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
 			       struct cifsSesInfo *ses,
 			       const struct nls_table *nls_cp)
 {
@@ -575,12 +575,11 @@
 	char *str_area;
 	SESSION_SETUP_ANDX *pSMB;
 	__u32 capabilities;
-	int count;
+	__u16 count;
 	int resp_buf_type;
 	struct kvec iov[3];
 	enum securityEnum type;
-	__u16 action;
-	int bytes_remaining;
+	__u16 action, bytes_remaining;
 	struct key *spnego_key = NULL;
 	__le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
 	u16 blob_len;
@@ -876,10 +875,10 @@
 	count = iov[1].iov_len + iov[2].iov_len;
 	smb_buf->smb_buf_length += count;
 
-	BCC_LE(smb_buf) = cpu_to_le16(count);
+	put_bcc_le(count, smb_buf);
 
 	rc = SendReceive2(xid, ses, iov, 3 /* num_iovecs */, &resp_buf_type,
-			  CIFS_STD_OP /* not long */ | CIFS_LOG_ERROR);
+			  CIFS_LOG_ERROR);
 	/* SMB request buf freed in SendReceive2 */
 
 	pSMB = (SESSION_SETUP_ANDX *)iov[0].iov_base;
@@ -910,7 +909,7 @@
 	cFYI(1, "UID = %d ", ses->Suid);
 	/* response can have either 3 or 4 word count - Samba sends 3 */
 	/* and lanman response is 3 */
-	bytes_remaining = BCC(smb_buf);
+	bytes_remaining = get_bcc(smb_buf);
 	bcc_ptr = pByteArea(smb_buf);
 
 	if (smb_buf->WordCount == 4) {
diff --git a/fs/cifs/smbdes.c b/fs/cifs/smbdes.c
index b6b6dcb..0472148 100644
--- a/fs/cifs/smbdes.c
+++ b/fs/cifs/smbdes.c
@@ -45,7 +45,6 @@
    up with a different answer to the one above)
 */
 #include <linux/slab.h>
-#include "cifsencrypt.h"
 #define uchar unsigned char
 
 static uchar perm1[56] = { 57, 49, 41, 33, 25, 17, 9,
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 192ea51..b5041c8 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -32,9 +32,8 @@
 #include "cifs_unicode.h"
 #include "cifspdu.h"
 #include "cifsglob.h"
-#include "md5.h"
 #include "cifs_debug.h"
-#include "cifsencrypt.h"
+#include "cifsproto.h"
 
 #ifndef false
 #define false 0
@@ -48,14 +47,58 @@
 #define SSVALX(buf,pos,val) (CVAL(buf,pos)=(val)&0xFF,CVAL(buf,pos+1)=(val)>>8)
 #define SSVAL(buf,pos,val) SSVALX((buf),(pos),((__u16)(val)))
 
-/*The following definitions come from  libsmb/smbencrypt.c  */
+/* produce a md4 message digest from data of length n bytes */
+int
+mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len)
+{
+	int rc;
+	unsigned int size;
+	struct crypto_shash *md4;
+	struct sdesc *sdescmd4;
 
-void SMBencrypt(unsigned char *passwd, const unsigned char *c8,
-		unsigned char *p24);
-void E_md4hash(const unsigned char *passwd, unsigned char *p16);
-static void SMBOWFencrypt(unsigned char passwd[16], const unsigned char *c8,
-		   unsigned char p24[24]);
-void SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24);
+	md4 = crypto_alloc_shash("md4", 0, 0);
+	if (IS_ERR(md4)) {
+		rc = PTR_ERR(md4);
+		cERROR(1, "%s: Crypto md4 allocation error %d\n", __func__, rc);
+		return rc;
+	}
+	size = sizeof(struct shash_desc) + crypto_shash_descsize(md4);
+	sdescmd4 = kmalloc(size, GFP_KERNEL);
+	if (!sdescmd4) {
+		rc = -ENOMEM;
+		cERROR(1, "%s: Memory allocation failure\n", __func__);
+		goto mdfour_err;
+	}
+	sdescmd4->shash.tfm = md4;
+	sdescmd4->shash.flags = 0x0;
+
+	rc = crypto_shash_init(&sdescmd4->shash);
+	if (rc) {
+		cERROR(1, "%s: Could not init md4 shash\n", __func__);
+		goto mdfour_err;
+	}
+	crypto_shash_update(&sdescmd4->shash, link_str, link_len);
+	rc = crypto_shash_final(&sdescmd4->shash, md4_hash);
+
+mdfour_err:
+	crypto_free_shash(md4);
+	kfree(sdescmd4);
+
+	return rc;
+}
+
+/* Does the des encryption from the NT or LM MD4 hash. */
+static void
+SMBOWFencrypt(unsigned char passwd[16], const unsigned char *c8,
+	      unsigned char p24[24])
+{
+	unsigned char p21[21];
+
+	memset(p21, '\0', 21);
+
+	memcpy(p21, passwd, 16);
+	E_P24(p21, c8, p24);
+}
 
 /*
    This implements the X/Open SMB password encryption
@@ -118,9 +161,10 @@
  * Creates the MD4 Hash of the users password in NT UNICODE.
  */
 
-void
+int
 E_md4hash(const unsigned char *passwd, unsigned char *p16)
 {
+	int rc;
 	int len;
 	__u16 wpwd[129];
 
@@ -139,8 +183,10 @@
 	/* Calculate length in bytes */
 	len = _my_wcslen(wpwd) * sizeof(__u16);
 
-	mdfour(p16, (unsigned char *) wpwd, len);
+	rc = mdfour(p16, (unsigned char *) wpwd, len);
 	memset(wpwd, 0, 129 * 2);
+
+	return rc;
 }
 
 #if 0 /* currently unused */
@@ -212,19 +258,6 @@
 }
 #endif
 
-/* Does the des encryption from the NT or LM MD4 hash. */
-static void
-SMBOWFencrypt(unsigned char passwd[16], const unsigned char *c8,
-	      unsigned char p24[24])
-{
-	unsigned char p21[21];
-
-	memset(p21, '\0', 21);
-
-	memcpy(p21, passwd, 16);
-	E_P24(p21, c8, p24);
-}
-
 /* Does the des encryption from the FIRST 8 BYTES of the NT or LM MD4 hash. */
 #if 0 /* currently unused */
 static void
@@ -242,16 +275,21 @@
 #endif
 
 /* Does the NT MD4 hash then des encryption. */
-
-void
+int
 SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24)
 {
+	int rc;
 	unsigned char p21[21];
 
 	memset(p21, '\0', 21);
 
-	E_md4hash(passwd, p21);
+	rc = E_md4hash(passwd, p21);
+	if (rc) {
+		cFYI(1, "%s Can't generate NT hash, error: %d", __func__, rc);
+		return rc;
+	}
 	SMBOWFencrypt(p21, c8, p24);
+	return rc;
 }
 
 
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 59ca81b..46d8756 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -36,7 +36,13 @@
 
 extern mempool_t *cifs_mid_poolp;
 
-static struct mid_q_entry *
+static void
+wake_up_task(struct mid_q_entry *mid)
+{
+	wake_up_process(mid->callback_data);
+}
+
+struct mid_q_entry *
 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
 {
 	struct mid_q_entry *temp;
@@ -58,28 +64,28 @@
 	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
 		/* when mid allocated can be before when sent */
 		temp->when_alloc = jiffies;
-		temp->tsk = current;
+
+		/*
+		 * The default is for the mid to be synchronous, so the
+		 * default callback just wakes up the current task.
+		 */
+		temp->callback = wake_up_task;
+		temp->callback_data = current;
 	}
 
-	spin_lock(&GlobalMid_Lock);
-	list_add_tail(&temp->qhead, &server->pending_mid_q);
 	atomic_inc(&midCount);
 	temp->midState = MID_REQUEST_ALLOCATED;
-	spin_unlock(&GlobalMid_Lock);
 	return temp;
 }
 
-static void
+void
 DeleteMidQEntry(struct mid_q_entry *midEntry)
 {
 #ifdef CONFIG_CIFS_STATS2
 	unsigned long now;
 #endif
-	spin_lock(&GlobalMid_Lock);
 	midEntry->midState = MID_FREE;
-	list_del(&midEntry->qhead);
 	atomic_dec(&midCount);
-	spin_unlock(&GlobalMid_Lock);
 	if (midEntry->largeBuf)
 		cifs_buf_release(midEntry->resp_buf);
 	else
@@ -103,6 +109,16 @@
 	mempool_free(midEntry, cifs_mid_poolp);
 }
 
+static void
+delete_mid(struct mid_q_entry *mid)
+{
+	spin_lock(&GlobalMid_Lock);
+	list_del(&mid->qhead);
+	spin_unlock(&GlobalMid_Lock);
+
+	DeleteMidQEntry(mid);
+}
+
 static int
 smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
 {
@@ -220,9 +236,9 @@
 		server->tcpStatus = CifsNeedReconnect;
 	}
 
-	if (rc < 0) {
+	if (rc < 0 && rc != -EINTR)
 		cERROR(1, "Error %d sending data on socket to server", rc);
-	} else
+	else
 		rc = 0;
 
 	/* Don't want to modify the buffer as a
@@ -244,31 +260,31 @@
 	return smb_sendv(server, &iov, 1);
 }
 
-static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
+static int wait_for_free_request(struct TCP_Server_Info *server,
+				 const int long_op)
 {
 	if (long_op == CIFS_ASYNC_OP) {
 		/* oplock breaks must not be held up */
-		atomic_inc(&ses->server->inFlight);
+		atomic_inc(&server->inFlight);
 		return 0;
 	}
 
 	spin_lock(&GlobalMid_Lock);
 	while (1) {
-		if (atomic_read(&ses->server->inFlight) >=
-				cifs_max_pending){
+		if (atomic_read(&server->inFlight) >= cifs_max_pending) {
 			spin_unlock(&GlobalMid_Lock);
 #ifdef CONFIG_CIFS_STATS2
-			atomic_inc(&ses->server->num_waiters);
+			atomic_inc(&server->num_waiters);
 #endif
-			wait_event(ses->server->request_q,
-				   atomic_read(&ses->server->inFlight)
+			wait_event(server->request_q,
+				   atomic_read(&server->inFlight)
 				     < cifs_max_pending);
 #ifdef CONFIG_CIFS_STATS2
-			atomic_dec(&ses->server->num_waiters);
+			atomic_dec(&server->num_waiters);
 #endif
 			spin_lock(&GlobalMid_Lock);
 		} else {
-			if (ses->server->tcpStatus == CifsExiting) {
+			if (server->tcpStatus == CifsExiting) {
 				spin_unlock(&GlobalMid_Lock);
 				return -ENOENT;
 			}
@@ -278,7 +294,7 @@
 
 			/* update # of requests on the wire to server */
 			if (long_op != CIFS_BLOCKING_OP)
-				atomic_inc(&ses->server->inFlight);
+				atomic_inc(&server->inFlight);
 			spin_unlock(&GlobalMid_Lock);
 			break;
 		}
@@ -308,55 +324,87 @@
 	*ppmidQ = AllocMidQEntry(in_buf, ses->server);
 	if (*ppmidQ == NULL)
 		return -ENOMEM;
+	spin_lock(&GlobalMid_Lock);
+	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
+	spin_unlock(&GlobalMid_Lock);
 	return 0;
 }
 
-static int wait_for_response(struct cifsSesInfo *ses,
-			struct mid_q_entry *midQ,
-			unsigned long timeout,
-			unsigned long time_to_wait)
+static int
+wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
 {
-	unsigned long curr_timeout;
+	int error;
 
-	for (;;) {
-		curr_timeout = timeout + jiffies;
-		wait_event_timeout(ses->server->response_q,
-			midQ->midState != MID_REQUEST_SUBMITTED, timeout);
+	error = wait_event_killable(server->response_q,
+				    midQ->midState != MID_REQUEST_SUBMITTED);
+	if (error < 0)
+		return -ERESTARTSYS;
 
-		if (time_after(jiffies, curr_timeout) &&
-			(midQ->midState == MID_REQUEST_SUBMITTED) &&
-			((ses->server->tcpStatus == CifsGood) ||
-			 (ses->server->tcpStatus == CifsNew))) {
-
-			unsigned long lrt;
-
-			/* We timed out. Is the server still
-			   sending replies ? */
-			spin_lock(&GlobalMid_Lock);
-			lrt = ses->server->lstrp;
-			spin_unlock(&GlobalMid_Lock);
-
-			/* Calculate time_to_wait past last receive time.
-			 Although we prefer not to time out if the
-			 server is still responding - we will time
-			 out if the server takes more than 15 (or 45
-			 or 180) seconds to respond to this request
-			 and has not responded to any request from
-			 other threads on the client within 10 seconds */
-			lrt += time_to_wait;
-			if (time_after(jiffies, lrt)) {
-				/* No replies for time_to_wait. */
-				cERROR(1, "server not responding");
-				return -1;
-			}
-		} else {
-			return 0;
-		}
-	}
+	return 0;
 }
 
 
 /*
+ * Send a SMB request and set the callback function in the mid to handle
+ * the result. Caller is responsible for dealing with timeouts.
+ */
+int
+cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
+		mid_callback_t *callback, void *cbdata)
+{
+	int rc;
+	struct mid_q_entry *mid;
+
+	rc = wait_for_free_request(server, CIFS_ASYNC_OP);
+	if (rc)
+		return rc;
+
+	/* enable signing if server requires it */
+	if (server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+		in_buf->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+
+	mutex_lock(&server->srv_mutex);
+	mid = AllocMidQEntry(in_buf, server);
+	if (mid == NULL) {
+		mutex_unlock(&server->srv_mutex);
+		return -ENOMEM;
+	}
+
+	/* put it on the pending_mid_q */
+	spin_lock(&GlobalMid_Lock);
+	list_add_tail(&mid->qhead, &server->pending_mid_q);
+	spin_unlock(&GlobalMid_Lock);
+
+	rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
+	if (rc) {
+		mutex_unlock(&server->srv_mutex);
+		goto out_err;
+	}
+
+	mid->callback = callback;
+	mid->callback_data = cbdata;
+	mid->midState = MID_REQUEST_SUBMITTED;
+#ifdef CONFIG_CIFS_STATS2
+	atomic_inc(&server->inSend);
+#endif
+	rc = smb_send(server, in_buf, in_buf->smb_buf_length);
+#ifdef CONFIG_CIFS_STATS2
+	atomic_dec(&server->inSend);
+	mid->when_sent = jiffies;
+#endif
+	mutex_unlock(&server->srv_mutex);
+	if (rc)
+		goto out_err;
+
+	return rc;
+out_err:
+	delete_mid(mid);
+	atomic_dec(&server->inFlight);
+	wake_up(&server->request_q);
+	return rc;
+}
+
+/*
  *
  * Send an SMB Request.  No response info (other than return code)
  * needs to be parsed.
@@ -382,6 +430,84 @@
 	return rc;
 }
 
+static int
+sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
+{
+	int rc = 0;
+
+	cFYI(1, "%s: cmd=%d mid=%d state=%d", __func__, mid->command,
+		mid->mid, mid->midState);
+
+	spin_lock(&GlobalMid_Lock);
+	/* ensure that it's no longer on the pending_mid_q */
+	list_del_init(&mid->qhead);
+
+	switch (mid->midState) {
+	case MID_RESPONSE_RECEIVED:
+		spin_unlock(&GlobalMid_Lock);
+		return rc;
+	case MID_REQUEST_SUBMITTED:
+		/* socket is going down, reject all calls */
+		if (server->tcpStatus == CifsExiting) {
+			cERROR(1, "%s: canceling mid=%d cmd=0x%x state=%d",
+			       __func__, mid->mid, mid->command, mid->midState);
+			rc = -EHOSTDOWN;
+			break;
+		}
+	case MID_RETRY_NEEDED:
+		rc = -EAGAIN;
+		break;
+	case MID_RESPONSE_MALFORMED:
+		rc = -EIO;
+		break;
+	default:
+		cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__,
+			mid->mid, mid->midState);
+		rc = -EIO;
+	}
+	spin_unlock(&GlobalMid_Lock);
+
+	DeleteMidQEntry(mid);
+	return rc;
+}
+
+/*
+ * An NT cancel request header looks just like the original request except:
+ *
+ * The Command is SMB_COM_NT_CANCEL
+ * The WordCount is zeroed out
+ * The ByteCount is zeroed out
+ *
+ * This function mangles an existing request buffer into a
+ * SMB_COM_NT_CANCEL request and then sends it.
+ */
+static int
+send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
+		struct mid_q_entry *mid)
+{
+	int rc = 0;
+
+	/* -4 for RFC1001 length and +2 for BCC field */
+	in_buf->smb_buf_length = sizeof(struct smb_hdr) - 4  + 2;
+	in_buf->Command = SMB_COM_NT_CANCEL;
+	in_buf->WordCount = 0;
+	put_bcc_le(0, in_buf);
+
+	mutex_lock(&server->srv_mutex);
+	rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
+	if (rc) {
+		mutex_unlock(&server->srv_mutex);
+		return rc;
+	}
+	rc = smb_send(server, in_buf, in_buf->smb_buf_length);
+	mutex_unlock(&server->srv_mutex);
+
+	cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
+		in_buf->Mid, rc);
+
+	return rc;
+}
+
 int
 SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
 	     struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
@@ -390,7 +516,6 @@
 	int rc = 0;
 	int long_op;
 	unsigned int receive_len;
-	unsigned long timeout;
 	struct mid_q_entry *midQ;
 	struct smb_hdr *in_buf = iov[0].iov_base;
 
@@ -413,7 +538,7 @@
 	   to the same server. We may make this configurable later or
 	   use ses->maxReq */
 
-	rc = wait_for_free_request(ses, long_op);
+	rc = wait_for_free_request(ses->server, long_op);
 	if (rc) {
 		cifs_small_buf_release(in_buf);
 		return rc;
@@ -452,70 +577,41 @@
 #endif
 
 	mutex_unlock(&ses->server->srv_mutex);
-	cifs_small_buf_release(in_buf);
 
-	if (rc < 0)
-		goto out;
-
-	if (long_op == CIFS_STD_OP)
-		timeout = 15 * HZ;
-	else if (long_op == CIFS_VLONG_OP) /* e.g. slow writes past EOF */
-		timeout = 180 * HZ;
-	else if (long_op == CIFS_LONG_OP)
-		timeout = 45 * HZ; /* should be greater than
-			servers oplock break timeout (about 43 seconds) */
-	else if (long_op == CIFS_ASYNC_OP)
-		goto out;
-	else if (long_op == CIFS_BLOCKING_OP)
-		timeout = 0x7FFFFFFF; /*  large, but not so large as to wrap */
-	else {
-		cERROR(1, "unknown timeout flag %d", long_op);
-		rc = -EIO;
+	if (rc < 0) {
+		cifs_small_buf_release(in_buf);
 		goto out;
 	}
 
-	/* wait for 15 seconds or until woken up due to response arriving or
-	   due to last connection to this server being unmounted */
-	if (signal_pending(current)) {
-		/* if signal pending do not hold up user for full smb timeout
-		but we still give response a chance to complete */
-		timeout = 2 * HZ;
+	if (long_op == CIFS_ASYNC_OP) {
+		cifs_small_buf_release(in_buf);
+		goto out;
 	}
 
-	/* No user interrupts in wait - wreaks havoc with performance */
-	wait_for_response(ses, midQ, timeout, 10 * HZ);
-
-	spin_lock(&GlobalMid_Lock);
-
-	if (midQ->resp_buf == NULL) {
-		cERROR(1, "No response to cmd %d mid %d",
-			midQ->command, midQ->mid);
+	rc = wait_for_response(ses->server, midQ);
+	if (rc != 0) {
+		send_nt_cancel(ses->server, in_buf, midQ);
+		spin_lock(&GlobalMid_Lock);
 		if (midQ->midState == MID_REQUEST_SUBMITTED) {
-			if (ses->server->tcpStatus == CifsExiting)
-				rc = -EHOSTDOWN;
-			else {
-				ses->server->tcpStatus = CifsNeedReconnect;
-				midQ->midState = MID_RETRY_NEEDED;
-			}
-		}
-
-		if (rc != -EHOSTDOWN) {
-			if (midQ->midState == MID_RETRY_NEEDED) {
-				rc = -EAGAIN;
-				cFYI(1, "marking request for retry");
-			} else {
-				rc = -EIO;
-			}
+			midQ->callback = DeleteMidQEntry;
+			spin_unlock(&GlobalMid_Lock);
+			cifs_small_buf_release(in_buf);
+			atomic_dec(&ses->server->inFlight);
+			wake_up(&ses->server->request_q);
+			return rc;
 		}
 		spin_unlock(&GlobalMid_Lock);
-		DeleteMidQEntry(midQ);
-		/* Update # of requests on wire to server */
+	}
+
+	cifs_small_buf_release(in_buf);
+
+	rc = sync_mid_result(midQ, ses->server);
+	if (rc != 0) {
 		atomic_dec(&ses->server->inFlight);
 		wake_up(&ses->server->request_q);
 		return rc;
 	}
 
-	spin_unlock(&GlobalMid_Lock);
 	receive_len = midQ->resp_buf->smb_buf_length;
 
 	if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
@@ -559,19 +655,18 @@
 		if (receive_len >= sizeof(struct smb_hdr) - 4
 		    /* do not count RFC1001 header */  +
 		    (2 * midQ->resp_buf->WordCount) + 2 /* bcc */ )
-			BCC(midQ->resp_buf) =
-				le16_to_cpu(BCC_LE(midQ->resp_buf));
+			put_bcc(get_bcc_le(midQ->resp_buf), midQ->resp_buf);
 		if ((flags & CIFS_NO_RESP) == 0)
 			midQ->resp_buf = NULL;  /* mark it so buf will
 						   not be freed by
-						   DeleteMidQEntry */
+						   delete_mid */
 	} else {
 		rc = -EIO;
 		cFYI(1, "Bad MID state?");
 	}
 
 out:
-	DeleteMidQEntry(midQ);
+	delete_mid(midQ);
 	atomic_dec(&ses->server->inFlight);
 	wake_up(&ses->server->request_q);
 
@@ -585,7 +680,6 @@
 {
 	int rc = 0;
 	unsigned int receive_len;
-	unsigned long timeout;
 	struct mid_q_entry *midQ;
 
 	if (ses == NULL) {
@@ -610,7 +704,7 @@
 		return -EIO;
 	}
 
-	rc = wait_for_free_request(ses, long_op);
+	rc = wait_for_free_request(ses->server, long_op);
 	if (rc)
 		return rc;
 
@@ -649,64 +743,31 @@
 	if (rc < 0)
 		goto out;
 
-	if (long_op == CIFS_STD_OP)
-		timeout = 15 * HZ;
-	/* wait for 15 seconds or until woken up due to response arriving or
-	   due to last connection to this server being unmounted */
-	else if (long_op == CIFS_ASYNC_OP)
+	if (long_op == CIFS_ASYNC_OP)
 		goto out;
-	else if (long_op == CIFS_VLONG_OP) /* writes past EOF can be slow */
-		timeout = 180 * HZ;
-	else if (long_op == CIFS_LONG_OP)
-		timeout = 45 * HZ; /* should be greater than
-			servers oplock break timeout (about 43 seconds) */
-	else if (long_op == CIFS_BLOCKING_OP)
-		timeout = 0x7FFFFFFF; /* large but no so large as to wrap */
-	else {
-		cERROR(1, "unknown timeout flag %d", long_op);
-		rc = -EIO;
-		goto out;
-	}
 
-	if (signal_pending(current)) {
-		/* if signal pending do not hold up user for full smb timeout
-		but we still give response a chance to complete */
-		timeout = 2 * HZ;
-	}
-
-	/* No user interrupts in wait - wreaks havoc with performance */
-	wait_for_response(ses, midQ, timeout, 10 * HZ);
-
-	spin_lock(&GlobalMid_Lock);
-	if (midQ->resp_buf == NULL) {
-		cERROR(1, "No response for cmd %d mid %d",
-			  midQ->command, midQ->mid);
+	rc = wait_for_response(ses->server, midQ);
+	if (rc != 0) {
+		send_nt_cancel(ses->server, in_buf, midQ);
+		spin_lock(&GlobalMid_Lock);
 		if (midQ->midState == MID_REQUEST_SUBMITTED) {
-			if (ses->server->tcpStatus == CifsExiting)
-				rc = -EHOSTDOWN;
-			else {
-				ses->server->tcpStatus = CifsNeedReconnect;
-				midQ->midState = MID_RETRY_NEEDED;
-			}
-		}
-
-		if (rc != -EHOSTDOWN) {
-			if (midQ->midState == MID_RETRY_NEEDED) {
-				rc = -EAGAIN;
-				cFYI(1, "marking request for retry");
-			} else {
-				rc = -EIO;
-			}
+			/* no longer considered to be "in-flight" */
+			midQ->callback = DeleteMidQEntry;
+			spin_unlock(&GlobalMid_Lock);
+			atomic_dec(&ses->server->inFlight);
+			wake_up(&ses->server->request_q);
+			return rc;
 		}
 		spin_unlock(&GlobalMid_Lock);
-		DeleteMidQEntry(midQ);
-		/* Update # of requests on wire to server */
+	}
+
+	rc = sync_mid_result(midQ, ses->server);
+	if (rc != 0) {
 		atomic_dec(&ses->server->inFlight);
 		wake_up(&ses->server->request_q);
 		return rc;
 	}
 
-	spin_unlock(&GlobalMid_Lock);
 	receive_len = midQ->resp_buf->smb_buf_length;
 
 	if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
@@ -748,43 +809,20 @@
 		if (receive_len >= sizeof(struct smb_hdr) - 4
 		    /* do not count RFC1001 header */  +
 		    (2 * out_buf->WordCount) + 2 /* bcc */ )
-			BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
+			put_bcc(get_bcc_le(midQ->resp_buf), midQ->resp_buf);
 	} else {
 		rc = -EIO;
 		cERROR(1, "Bad MID state?");
 	}
 
 out:
-	DeleteMidQEntry(midQ);
+	delete_mid(midQ);
 	atomic_dec(&ses->server->inFlight);
 	wake_up(&ses->server->request_q);
 
 	return rc;
 }
 
-/* Send an NT_CANCEL SMB to cause the POSIX blocking lock to return. */
-
-static int
-send_nt_cancel(struct cifsTconInfo *tcon, struct smb_hdr *in_buf,
-		struct mid_q_entry *midQ)
-{
-	int rc = 0;
-	struct cifsSesInfo *ses = tcon->ses;
-	__u16 mid = in_buf->Mid;
-
-	header_assemble(in_buf, SMB_COM_NT_CANCEL, tcon, 0);
-	in_buf->Mid = mid;
-	mutex_lock(&ses->server->srv_mutex);
-	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
-	if (rc) {
-		mutex_unlock(&ses->server->srv_mutex);
-		return rc;
-	}
-	rc = smb_send(ses->server, in_buf, in_buf->smb_buf_length);
-	mutex_unlock(&ses->server->srv_mutex);
-	return rc;
-}
-
 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
    blocking lock to return. */
 
@@ -807,7 +845,7 @@
 	pSMB->hdr.Mid = GetNextMid(ses->server);
 
 	return SendReceive(xid, ses, in_buf, out_buf,
-			&bytes_returned, CIFS_STD_OP);
+			&bytes_returned, 0);
 }
 
 int
@@ -845,7 +883,7 @@
 		return -EIO;
 	}
 
-	rc = wait_for_free_request(ses, CIFS_BLOCKING_OP);
+	rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP);
 	if (rc)
 		return rc;
 
@@ -863,7 +901,7 @@
 
 	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
 	if (rc) {
-		DeleteMidQEntry(midQ);
+		delete_mid(midQ);
 		mutex_unlock(&ses->server->srv_mutex);
 		return rc;
 	}
@@ -880,7 +918,7 @@
 	mutex_unlock(&ses->server->srv_mutex);
 
 	if (rc < 0) {
-		DeleteMidQEntry(midQ);
+		delete_mid(midQ);
 		return rc;
 	}
 
@@ -899,10 +937,9 @@
 		if (in_buf->Command == SMB_COM_TRANSACTION2) {
 			/* POSIX lock. We send a NT_CANCEL SMB to cause the
 			   blocking lock to return. */
-
-			rc = send_nt_cancel(tcon, in_buf, midQ);
+			rc = send_nt_cancel(ses->server, in_buf, midQ);
 			if (rc) {
-				DeleteMidQEntry(midQ);
+				delete_mid(midQ);
 				return rc;
 			}
 		} else {
@@ -914,47 +951,33 @@
 			/* If we get -ENOLCK back the lock may have
 			   already been removed. Don't exit in this case. */
 			if (rc && rc != -ENOLCK) {
-				DeleteMidQEntry(midQ);
+				delete_mid(midQ);
 				return rc;
 			}
 		}
 
-		/* Wait 5 seconds for the response. */
-		if (wait_for_response(ses, midQ, 5 * HZ, 5 * HZ) == 0) {
-			/* We got the response - restart system call. */
-			rstart = 1;
+		rc = wait_for_response(ses->server, midQ);
+		if (rc) {
+			send_nt_cancel(ses->server, in_buf, midQ);
+			spin_lock(&GlobalMid_Lock);
+			if (midQ->midState == MID_REQUEST_SUBMITTED) {
+				/* no longer considered to be "in-flight" */
+				midQ->callback = DeleteMidQEntry;
+				spin_unlock(&GlobalMid_Lock);
+				return rc;
+			}
+			spin_unlock(&GlobalMid_Lock);
 		}
+
+		/* We got the response - restart system call. */
+		rstart = 1;
 	}
 
-	spin_lock(&GlobalMid_Lock);
-	if (midQ->resp_buf) {
-		spin_unlock(&GlobalMid_Lock);
-		receive_len = midQ->resp_buf->smb_buf_length;
-	} else {
-		cERROR(1, "No response for cmd %d mid %d",
-			  midQ->command, midQ->mid);
-		if (midQ->midState == MID_REQUEST_SUBMITTED) {
-			if (ses->server->tcpStatus == CifsExiting)
-				rc = -EHOSTDOWN;
-			else {
-				ses->server->tcpStatus = CifsNeedReconnect;
-				midQ->midState = MID_RETRY_NEEDED;
-			}
-		}
-
-		if (rc != -EHOSTDOWN) {
-			if (midQ->midState == MID_RETRY_NEEDED) {
-				rc = -EAGAIN;
-				cFYI(1, "marking request for retry");
-			} else {
-				rc = -EIO;
-			}
-		}
-		spin_unlock(&GlobalMid_Lock);
-		DeleteMidQEntry(midQ);
+	rc = sync_mid_result(midQ, ses->server);
+	if (rc != 0)
 		return rc;
-	}
 
+	receive_len = midQ->resp_buf->smb_buf_length;
 	if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
 		cERROR(1, "Frame too large received.  Length: %d  Xid: %d",
 			receive_len, xid);
@@ -998,10 +1021,10 @@
 	if (receive_len >= sizeof(struct smb_hdr) - 4
 	    /* do not count RFC1001 header */  +
 	    (2 * out_buf->WordCount) + 2 /* bcc */ )
-		BCC(out_buf) = le16_to_cpu(BCC_LE(out_buf));
+		put_bcc(get_bcc_le(out_buf), out_buf);
 
 out:
-	DeleteMidQEntry(midQ);
+	delete_mid(midQ);
 	if (rstart && rc == -EACCES)
 		return -ERESTARTSYS;
 	return rc;
diff --git a/fs/dcache.c b/fs/dcache.c
index 9f493ee..2a6bd9a 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -176,6 +176,7 @@
 
 /**
  * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
+ * @dentry: the target dentry
  * After this call, in-progress rcu-walk path lookup will fail. This
  * should be called after unhashing, and after changing d_inode (if
  * the dentry has not already been unhashed).
@@ -281,6 +282,7 @@
 /**
  * d_kill - kill dentry and return parent
  * @dentry: dentry to kill
+ * @parent: parent dentry
  *
  * The dentry must already be unhashed and removed from the LRU.
  *
@@ -1973,7 +1975,7 @@
 /**
  * d_validate - verify dentry provided from insecure source (deprecated)
  * @dentry: The dentry alleged to be valid child of @dparent
- * @parent: The parent dentry (known to be valid)
+ * @dparent: The parent dentry (known to be valid)
  *
  * An insecure source has sent us a dentry, here we verify it and dget() it.
  * This is used by ncpfs in its readdir implementation.
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 85882f6..b044705 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -325,12 +325,16 @@
 }
 EXPORT_SYMBOL_GPL(dio_end_io);
 
-static int
+static void
 dio_bio_alloc(struct dio *dio, struct block_device *bdev,
 		sector_t first_sector, int nr_vecs)
 {
 	struct bio *bio;
 
+	/*
+	 * bio_alloc() is guaranteed to return a bio when called with
+	 * __GFP_WAIT and we request a valid number of vectors.
+	 */
 	bio = bio_alloc(GFP_KERNEL, nr_vecs);
 
 	bio->bi_bdev = bdev;
@@ -342,7 +346,6 @@
 
 	dio->bio = bio;
 	dio->logical_offset_in_bio = dio->cur_page_fs_offset;
-	return 0;
 }
 
 /*
@@ -583,8 +586,9 @@
 		goto out;
 	sector = start_sector << (dio->blkbits - 9);
 	nr_pages = min(dio->pages_in_io, bio_get_nr_vecs(dio->map_bh.b_bdev));
+	nr_pages = min(nr_pages, BIO_MAX_PAGES);
 	BUG_ON(nr_pages <= 0);
-	ret = dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages);
+	dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages);
 	dio->boundary = 0;
 out:
 	return ret;
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 9c64ae9..2d8c87b 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -1468,15 +1468,13 @@
 
 static int work_start(void)
 {
-	recv_workqueue = alloc_workqueue("dlm_recv", WQ_MEM_RECLAIM |
-					 WQ_HIGHPRI | WQ_FREEZEABLE, 0);
+	recv_workqueue = create_singlethread_workqueue("dlm_recv");
 	if (!recv_workqueue) {
 		log_print("can't start dlm_recv");
 		return -ENOMEM;
 	}
 
-	send_workqueue = alloc_workqueue("dlm_send", WQ_MEM_RECLAIM |
-					 WQ_HIGHPRI | WQ_FREEZEABLE, 0);
+	send_workqueue = create_singlethread_workqueue("dlm_send");
 	if (!send_workqueue) {
 		log_print("can't start dlm_send");
 		destroy_workqueue(recv_workqueue);
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index cc8a9b7..267d0ad 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1114,6 +1114,17 @@
 	return ep_scan_ready_list(ep, ep_send_events_proc, &esed);
 }
 
+static inline struct timespec ep_set_mstimeout(long ms)
+{
+	struct timespec now, ts = {
+		.tv_sec = ms / MSEC_PER_SEC,
+		.tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC),
+	};
+
+	ktime_get_ts(&now);
+	return timespec_add_safe(now, ts);
+}
+
 static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
 		   int maxevents, long timeout)
 {
@@ -1121,12 +1132,11 @@
 	unsigned long flags;
 	long slack;
 	wait_queue_t wait;
-	struct timespec end_time;
 	ktime_t expires, *to = NULL;
 
 	if (timeout > 0) {
-		ktime_get_ts(&end_time);
-		timespec_add_ns(&end_time, (u64)timeout * NSEC_PER_MSEC);
+		struct timespec end_time = ep_set_mstimeout(timeout);
+
 		slack = select_estimate_accuracy(&end_time);
 		to = &expires;
 		*to = timespec_to_ktime(end_time);
diff --git a/fs/exec.c b/fs/exec.c
index c62efcb..52a447d 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -120,7 +120,7 @@
 		goto out;
 
 	file = do_filp_open(AT_FDCWD, tmp,
-				O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
+				O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 0,
 				MAY_READ | MAY_EXEC | MAY_OPEN);
 	putname(tmp);
 	error = PTR_ERR(file);
@@ -723,7 +723,7 @@
 	int err;
 
 	file = do_filp_open(AT_FDCWD, name,
-				O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
+				O_LARGEFILE | O_RDONLY | __FMODE_EXEC, 0,
 				MAY_EXEC | MAY_OPEN);
 	if (IS_ERR(file))
 		goto out;
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 4268542..a755523 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -1030,7 +1030,6 @@
 		memcpy(oi->i_data, fcb.i_data, sizeof(fcb.i_data));
 	}
 
-	inode->i_mapping->backing_dev_info = sb->s_bdi;
 	if (S_ISREG(inode->i_mode)) {
 		inode->i_op = &exofs_file_inode_operations;
 		inode->i_fop = &exofs_file_operations;
@@ -1131,7 +1130,6 @@
 
 	sbi = sb->s_fs_info;
 
-	inode->i_mapping->backing_dev_info = sb->s_bdi;
 	sb->s_dirt = 1;
 	inode_init_owner(inode, dir, mode);
 	inode->i_ino = sbi->s_nextid++;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 7aa767d..85c8cc8 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -754,7 +754,7 @@
 static int ext3_mark_dquot_dirty(struct dquot *dquot);
 static int ext3_write_info(struct super_block *sb, int type);
 static int ext3_quota_on(struct super_block *sb, int type, int format_id,
-				char *path);
+			 struct path *path);
 static int ext3_quota_on_mount(struct super_block *sb, int type);
 static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
 			       size_t len, loff_t off);
@@ -2877,27 +2877,20 @@
  * Standard function to be called on quota_on
  */
 static int ext3_quota_on(struct super_block *sb, int type, int format_id,
-			 char *name)
+			 struct path *path)
 {
 	int err;
-	struct path path;
 
 	if (!test_opt(sb, QUOTA))
 		return -EINVAL;
 
-	err = kern_path(name, LOOKUP_FOLLOW, &path);
-	if (err)
-		return err;
-
 	/* Quotafile not on the same filesystem? */
-	if (path.mnt->mnt_sb != sb) {
-		path_put(&path);
+	if (path->mnt->mnt_sb != sb)
 		return -EXDEV;
-	}
 	/* Journaling quota? */
 	if (EXT3_SB(sb)->s_qf_names[type]) {
 		/* Quotafile not of fs root? */
-		if (path.dentry->d_parent != sb->s_root)
+		if (path->dentry->d_parent != sb->s_root)
 			ext3_msg(sb, KERN_WARNING,
 				"warning: Quota file not on filesystem root. "
 				"Journaled quota will not work.");
@@ -2907,7 +2900,7 @@
 	 * When we journal data on quota file, we have to flush journal to see
 	 * all updates to the file when we bypass pagecache...
 	 */
-	if (ext3_should_journal_data(path.dentry->d_inode)) {
+	if (ext3_should_journal_data(path->dentry->d_inode)) {
 		/*
 		 * We don't need to lock updates but journal_flush() could
 		 * otherwise be livelocked...
@@ -2915,15 +2908,11 @@
 		journal_lock_updates(EXT3_SB(sb)->s_journal);
 		err = journal_flush(EXT3_SB(sb)->s_journal);
 		journal_unlock_updates(EXT3_SB(sb)->s_journal);
-		if (err) {
-			path_put(&path);
+		if (err)
 			return err;
-		}
 	}
 
-	err = dquot_quota_on_path(sb, type, format_id, &path);
-	path_put(&path);
-	return err;
+	return dquot_quota_on(sb, type, format_id, path);
 }
 
 /* Read data from quotafile - avoid pagecache and such because we cannot afford
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 0c8d97b..3aa0b72 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -848,6 +848,7 @@
 	atomic_t i_ioend_count;	/* Number of outstanding io_end structs */
 	/* current io_end structure for async DIO write*/
 	ext4_io_end_t *cur_aio_dio;
+	atomic_t i_aiodio_unwritten; /* Nr. of inflight conversions pending */
 
 	spinlock_t i_block_reservation_lock;
 
@@ -2119,6 +2120,15 @@
 
 #define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)
 
+/* For ioend & aio unwritten conversion wait queues */
+#define EXT4_WQ_HASH_SZ		37
+#define ext4_ioend_wq(v)   (&ext4__ioend_wq[((unsigned long)(v)) %\
+					    EXT4_WQ_HASH_SZ])
+#define ext4_aio_mutex(v)  (&ext4__aio_mutex[((unsigned long)(v)) %\
+					     EXT4_WQ_HASH_SZ])
+extern wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
+extern struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ];
+
 #endif	/* __KERNEL__ */
 
 #endif	/* _EXT4_H */
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 63a7581..ccce8a7 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3174,9 +3174,10 @@
 		 * that this IO needs to convertion to written when IO is
 		 * completed
 		 */
-		if (io)
+		if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) {
 			io->flag = EXT4_IO_END_UNWRITTEN;
-		else
+			atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
+		} else
 			ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
 		if (ext4_should_dioread_nolock(inode))
 			map->m_flags |= EXT4_MAP_UNINIT;
@@ -3463,9 +3464,10 @@
 		 * that we need to perform convertion when IO is done.
 		 */
 		if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
-			if (io)
+			if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) {
 				io->flag = EXT4_IO_END_UNWRITTEN;
-			else
+				atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
+			} else
 				ext4_set_inode_state(inode,
 						     EXT4_STATE_DIO_UNWRITTEN);
 		}
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 2e8322c..7b80d54 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -55,11 +55,47 @@
 	return 0;
 }
 
+static void ext4_aiodio_wait(struct inode *inode)
+{
+	wait_queue_head_t *wq = ext4_ioend_wq(inode);
+
+	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_aiodio_unwritten) == 0));
+}
+
+/*
+ * This tests whether the IO in question is block-aligned or not.
+ * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
+ * are converted to written only after the IO is complete.  Until they are
+ * mapped, these blocks appear as holes, so dio_zero_block() will assume that
+ * it needs to zero out portions of the start and/or end block.  If 2 AIO
+ * threads are at work on the same unwritten block, they must be synchronized
+ * or one thread will zero the other's data, causing corruption.
+ */
+static int
+ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
+		   unsigned long nr_segs, loff_t pos)
+{
+	struct super_block *sb = inode->i_sb;
+	int blockmask = sb->s_blocksize - 1;
+	size_t count = iov_length(iov, nr_segs);
+	loff_t final_size = pos + count;
+
+	if (pos >= inode->i_size)
+		return 0;
+
+	if ((pos & blockmask) || (final_size & blockmask))
+		return 1;
+
+	return 0;
+}
+
 static ssize_t
 ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
 		unsigned long nr_segs, loff_t pos)
 {
 	struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
+	int unaligned_aio = 0;
+	int ret;
 
 	/*
 	 * If we have encountered a bitmap-format file, the size limit
@@ -78,9 +114,31 @@
 			nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
 					      sbi->s_bitmap_maxbytes - pos);
 		}
+	} else if (unlikely((iocb->ki_filp->f_flags & O_DIRECT) &&
+		   !is_sync_kiocb(iocb))) {
+		unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
 	}
 
-	return generic_file_aio_write(iocb, iov, nr_segs, pos);
+	/* Unaligned direct AIO must be serialized; see comment above */
+	if (unaligned_aio) {
+		static unsigned long unaligned_warn_time;
+
+		/* Warn about this once per day */
+		if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ))
+			ext4_msg(inode->i_sb, KERN_WARNING,
+				 "Unaligned AIO/DIO on inode %ld by %s; "
+				 "performance will be poor.",
+				 inode->i_ino, current->comm);
+		mutex_lock(ext4_aio_mutex(inode));
+		ext4_aiodio_wait(inode);
+	}
+
+	ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
+
+	if (unaligned_aio)
+		mutex_unlock(ext4_aio_mutex(inode));
+
+	return ret;
 }
 
 static const struct vm_operations_struct ext4_file_vm_ops = {
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 851f49b..d1fe09a 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -342,10 +342,15 @@
 /* We create slab caches for groupinfo data structures based on the
  * superblock block size.  There will be one per mounted filesystem for
  * each unique s_blocksize_bits */
-#define NR_GRPINFO_CACHES	\
-	(EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE + 1)
+#define NR_GRPINFO_CACHES 8
 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES];
 
+static const char *ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = {
+	"ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k",
+	"ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k",
+	"ext4_groupinfo_64k", "ext4_groupinfo_128k"
+};
+
 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
 					ext4_group_t group);
 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
@@ -2414,6 +2419,55 @@
 	return -ENOMEM;
 }
 
+static void ext4_groupinfo_destroy_slabs(void)
+{
+	int i;
+
+	for (i = 0; i < NR_GRPINFO_CACHES; i++) {
+		if (ext4_groupinfo_caches[i])
+			kmem_cache_destroy(ext4_groupinfo_caches[i]);
+		ext4_groupinfo_caches[i] = NULL;
+	}
+}
+
+static int ext4_groupinfo_create_slab(size_t size)
+{
+	static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex);
+	int slab_size;
+	int blocksize_bits = order_base_2(size);
+	int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
+	struct kmem_cache *cachep;
+
+	if (cache_index >= NR_GRPINFO_CACHES)
+		return -EINVAL;
+
+	if (unlikely(cache_index < 0))
+		cache_index = 0;
+
+	mutex_lock(&ext4_grpinfo_slab_create_mutex);
+	if (ext4_groupinfo_caches[cache_index]) {
+		mutex_unlock(&ext4_grpinfo_slab_create_mutex);
+		return 0;	/* Already created */
+	}
+
+	slab_size = offsetof(struct ext4_group_info,
+				bb_counters[blocksize_bits + 2]);
+
+	cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index],
+					slab_size, 0, SLAB_RECLAIM_ACCOUNT,
+					NULL);
+
+	mutex_unlock(&ext4_grpinfo_slab_create_mutex);
+	if (!cachep) {
+		printk(KERN_EMERG "EXT4: no memory for groupinfo slab cache\n");
+		return -ENOMEM;
+	}
+
+	ext4_groupinfo_caches[cache_index] = cachep;
+
+	return 0;
+}
+
 int ext4_mb_init(struct super_block *sb, int needs_recovery)
 {
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -2421,9 +2475,6 @@
 	unsigned offset;
 	unsigned max;
 	int ret;
-	int cache_index;
-	struct kmem_cache *cachep;
-	char *namep = NULL;
 
 	i = (sb->s_blocksize_bits + 2) * sizeof(*sbi->s_mb_offsets);
 
@@ -2440,30 +2491,9 @@
 		goto out;
 	}
 
-	cache_index = sb->s_blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE;
-	cachep = ext4_groupinfo_caches[cache_index];
-	if (!cachep) {
-		char name[32];
-		int len = offsetof(struct ext4_group_info,
-					bb_counters[sb->s_blocksize_bits + 2]);
-
-		sprintf(name, "ext4_groupinfo_%d", sb->s_blocksize_bits);
-		namep = kstrdup(name, GFP_KERNEL);
-		if (!namep) {
-			ret = -ENOMEM;
-			goto out;
-		}
-
-		/* Need to free the kmem_cache_name() when we
-		 * destroy the slab */
-		cachep = kmem_cache_create(namep, len, 0,
-					     SLAB_RECLAIM_ACCOUNT, NULL);
-		if (!cachep) {
-			ret = -ENOMEM;
-			goto out;
-		}
-		ext4_groupinfo_caches[cache_index] = cachep;
-	}
+	ret = ext4_groupinfo_create_slab(sb->s_blocksize);
+	if (ret < 0)
+		goto out;
 
 	/* order 0 is regular bitmap */
 	sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
@@ -2520,7 +2550,6 @@
 	if (ret) {
 		kfree(sbi->s_mb_offsets);
 		kfree(sbi->s_mb_maxs);
-		kfree(namep);
 	}
 	return ret;
 }
@@ -2734,7 +2763,6 @@
 
 void ext4_exit_mballoc(void)
 {
-	int i;
 	/*
 	 * Wait for completion of call_rcu()'s on ext4_pspace_cachep
 	 * before destroying the slab cache.
@@ -2743,15 +2771,7 @@
 	kmem_cache_destroy(ext4_pspace_cachep);
 	kmem_cache_destroy(ext4_ac_cachep);
 	kmem_cache_destroy(ext4_free_ext_cachep);
-
-	for (i = 0; i < NR_GRPINFO_CACHES; i++) {
-		struct kmem_cache *cachep = ext4_groupinfo_caches[i];
-		if (cachep) {
-			char *name = (char *)kmem_cache_name(cachep);
-			kmem_cache_destroy(cachep);
-			kfree(name);
-		}
-	}
+	ext4_groupinfo_destroy_slabs();
 	ext4_remove_debugfs_entry();
 }
 
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 7270dcf..955cc30 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -32,14 +32,8 @@
 
 static struct kmem_cache *io_page_cachep, *io_end_cachep;
 
-#define WQ_HASH_SZ		37
-#define to_ioend_wq(v)	(&ioend_wq[((unsigned long)v) % WQ_HASH_SZ])
-static wait_queue_head_t ioend_wq[WQ_HASH_SZ];
-
 int __init ext4_init_pageio(void)
 {
-	int i;
-
 	io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
 	if (io_page_cachep == NULL)
 		return -ENOMEM;
@@ -48,9 +42,6 @@
 		kmem_cache_destroy(io_page_cachep);
 		return -ENOMEM;
 	}
-	for (i = 0; i < WQ_HASH_SZ; i++)
-		init_waitqueue_head(&ioend_wq[i]);
-
 	return 0;
 }
 
@@ -62,7 +53,7 @@
 
 void ext4_ioend_wait(struct inode *inode)
 {
-	wait_queue_head_t *wq = to_ioend_wq(inode);
+	wait_queue_head_t *wq = ext4_ioend_wq(inode);
 
 	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
 }
@@ -87,7 +78,7 @@
 	for (i = 0; i < io->num_io_pages; i++)
 		put_io_page(io->pages[i]);
 	io->num_io_pages = 0;
-	wq = to_ioend_wq(io->inode);
+	wq = ext4_ioend_wq(io->inode);
 	if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) &&
 	    waitqueue_active(wq))
 		wake_up_all(wq);
@@ -102,6 +93,7 @@
 	struct inode *inode = io->inode;
 	loff_t offset = io->offset;
 	ssize_t size = io->size;
+	wait_queue_head_t *wq;
 	int ret = 0;
 
 	ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
@@ -126,7 +118,16 @@
 	if (io->iocb)
 		aio_complete(io->iocb, io->result, 0);
 	/* clear the DIO AIO unwritten flag */
-	io->flag &= ~EXT4_IO_END_UNWRITTEN;
+	if (io->flag & EXT4_IO_END_UNWRITTEN) {
+		io->flag &= ~EXT4_IO_END_UNWRITTEN;
+		/* Wake up anyone waiting on unwritten extent conversion */
+		wq = ext4_ioend_wq(io->inode);
+		if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten) &&
+		    waitqueue_active(wq)) {
+			wake_up_all(wq);
+		}
+	}
+
 	return ret;
 }
 
@@ -190,6 +191,7 @@
 	struct inode *inode;
 	unsigned long flags;
 	int i;
+	sector_t bi_sector = bio->bi_sector;
 
 	BUG_ON(!io_end);
 	bio->bi_private = NULL;
@@ -207,9 +209,7 @@
 		if (error)
 			SetPageError(page);
 		BUG_ON(!head);
-		if (head->b_size == PAGE_CACHE_SIZE)
-			clear_buffer_dirty(head);
-		else {
+		if (head->b_size != PAGE_CACHE_SIZE) {
 			loff_t offset;
 			loff_t io_end_offset = io_end->offset + io_end->size;
 
@@ -221,7 +221,6 @@
 					if (error)
 						buffer_io_error(bh);
 
-					clear_buffer_dirty(bh);
 				}
 				if (buffer_delay(bh))
 					partial_write = 1;
@@ -257,7 +256,7 @@
 			     (unsigned long long) io_end->offset,
 			     (long) io_end->size,
 			     (unsigned long long)
-			     bio->bi_sector >> (inode->i_blkbits - 9));
+			     bi_sector >> (inode->i_blkbits - 9));
 	}
 
 	/* Add the io_end to per-inode completed io list*/
@@ -380,6 +379,7 @@
 
 	blocksize = 1 << inode->i_blkbits;
 
+	BUG_ON(!PageLocked(page));
 	BUG_ON(PageWriteback(page));
 	set_page_writeback(page);
 	ClearPageError(page);
@@ -397,12 +397,14 @@
 	for (bh = head = page_buffers(page), block_start = 0;
 	     bh != head || !block_start;
 	     block_start = block_end, bh = bh->b_this_page) {
+
 		block_end = block_start + blocksize;
 		if (block_start >= len) {
 			clear_buffer_dirty(bh);
 			set_buffer_uptodate(bh);
 			continue;
 		}
+		clear_buffer_dirty(bh);
 		ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
 		if (ret) {
 			/*
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index cb10a06..f6a318f 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -77,6 +77,7 @@
 		       const char *dev_name, void *data);
 static void ext4_destroy_lazyinit_thread(void);
 static void ext4_unregister_li_request(struct super_block *sb);
+static void ext4_clear_request_list(void);
 
 #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
 static struct file_system_type ext3_fs_type = {
@@ -832,6 +833,7 @@
 	ei->i_sync_tid = 0;
 	ei->i_datasync_tid = 0;
 	atomic_set(&ei->i_ioend_count, 0);
+	atomic_set(&ei->i_aiodio_unwritten, 0);
 
 	return &ei->vfs_inode;
 }
@@ -1161,7 +1163,7 @@
 static int ext4_mark_dquot_dirty(struct dquot *dquot);
 static int ext4_write_info(struct super_block *sb, int type);
 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
-				char *path);
+			 struct path *path);
 static int ext4_quota_off(struct super_block *sb, int type);
 static int ext4_quota_on_mount(struct super_block *sb, int type);
 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
@@ -2716,6 +2718,8 @@
 	mutex_unlock(&ext4_li_info->li_list_mtx);
 }
 
+static struct task_struct *ext4_lazyinit_task;
+
 /*
  * This is the function where ext4lazyinit thread lives. It walks
  * through the request list searching for next scheduled filesystem.
@@ -2784,6 +2788,10 @@
 		if (time_before(jiffies, next_wakeup))
 			schedule();
 		finish_wait(&eli->li_wait_daemon, &wait);
+		if (kthread_should_stop()) {
+			ext4_clear_request_list();
+			goto exit_thread;
+		}
 	}
 
 exit_thread:
@@ -2808,6 +2816,7 @@
 	wake_up(&eli->li_wait_task);
 
 	kfree(ext4_li_info);
+	ext4_lazyinit_task = NULL;
 	ext4_li_info = NULL;
 	mutex_unlock(&ext4_li_mtx);
 
@@ -2830,11 +2839,10 @@
 
 static int ext4_run_lazyinit_thread(void)
 {
-	struct task_struct *t;
-
-	t = kthread_run(ext4_lazyinit_thread, ext4_li_info, "ext4lazyinit");
-	if (IS_ERR(t)) {
-		int err = PTR_ERR(t);
+	ext4_lazyinit_task = kthread_run(ext4_lazyinit_thread,
+					 ext4_li_info, "ext4lazyinit");
+	if (IS_ERR(ext4_lazyinit_task)) {
+		int err = PTR_ERR(ext4_lazyinit_task);
 		ext4_clear_request_list();
 		del_timer_sync(&ext4_li_info->li_timer);
 		kfree(ext4_li_info);
@@ -2985,16 +2993,10 @@
 	 * If thread exited earlier
 	 * there's nothing to be done.
 	 */
-	if (!ext4_li_info)
+	if (!ext4_li_info || !ext4_lazyinit_task)
 		return;
 
-	ext4_clear_request_list();
-
-	while (ext4_li_info->li_task) {
-		wake_up(&ext4_li_info->li_wait_daemon);
-		wait_event(ext4_li_info->li_wait_task,
-			   ext4_li_info->li_task == NULL);
-	}
+	kthread_stop(ext4_lazyinit_task);
 }
 
 static int ext4_fill_super(struct super_block *sb, void *data, int silent)
@@ -4558,27 +4560,20 @@
  * Standard function to be called on quota_on
  */
 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
-			 char *name)
+			 struct path *path)
 {
 	int err;
-	struct path path;
 
 	if (!test_opt(sb, QUOTA))
 		return -EINVAL;
 
-	err = kern_path(name, LOOKUP_FOLLOW, &path);
-	if (err)
-		return err;
-
 	/* Quotafile not on the same filesystem? */
-	if (path.mnt->mnt_sb != sb) {
-		path_put(&path);
+	if (path->mnt->mnt_sb != sb)
 		return -EXDEV;
-	}
 	/* Journaling quota? */
 	if (EXT4_SB(sb)->s_qf_names[type]) {
 		/* Quotafile not in fs root? */
-		if (path.dentry->d_parent != sb->s_root)
+		if (path->dentry->d_parent != sb->s_root)
 			ext4_msg(sb, KERN_WARNING,
 				"Quota file not on filesystem root. "
 				"Journaled quota will not work");
@@ -4589,7 +4584,7 @@
 	 * all updates to the file when we bypass pagecache...
 	 */
 	if (EXT4_SB(sb)->s_journal &&
-	    ext4_should_journal_data(path.dentry->d_inode)) {
+	    ext4_should_journal_data(path->dentry->d_inode)) {
 		/*
 		 * We don't need to lock updates but journal_flush() could
 		 * otherwise be livelocked...
@@ -4597,15 +4592,11 @@
 		jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
 		err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
 		jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
-		if (err) {
-			path_put(&path);
+		if (err)
 			return err;
-		}
 	}
 
-	err = dquot_quota_on_path(sb, type, format_id, &path);
-	path_put(&path);
-	return err;
+	return dquot_quota_on(sb, type, format_id, path);
 }
 
 static int ext4_quota_off(struct super_block *sb, int type)
@@ -4779,7 +4770,7 @@
 	.fs_flags	= FS_REQUIRES_DEV,
 };
 
-int __init ext4_init_feat_adverts(void)
+static int __init ext4_init_feat_adverts(void)
 {
 	struct ext4_features *ef;
 	int ret = -ENOMEM;
@@ -4803,23 +4794,44 @@
 	return ret;
 }
 
+static void ext4_exit_feat_adverts(void)
+{
+	kobject_put(&ext4_feat->f_kobj);
+	wait_for_completion(&ext4_feat->f_kobj_unregister);
+	kfree(ext4_feat);
+}
+
+/* Shared across all ext4 file systems */
+wait_queue_head_t ext4__ioend_wq[EXT4_WQ_HASH_SZ];
+struct mutex ext4__aio_mutex[EXT4_WQ_HASH_SZ];
+
 static int __init ext4_init_fs(void)
 {
-	int err;
+	int i, err;
 
 	ext4_check_flag_values();
+
+	for (i = 0; i < EXT4_WQ_HASH_SZ; i++) {
+		mutex_init(&ext4__aio_mutex[i]);
+		init_waitqueue_head(&ext4__ioend_wq[i]);
+	}
+
 	err = ext4_init_pageio();
 	if (err)
 		return err;
 	err = ext4_init_system_zone();
 	if (err)
-		goto out5;
+		goto out7;
 	ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj);
 	if (!ext4_kset)
-		goto out4;
+		goto out6;
 	ext4_proc_root = proc_mkdir("fs/ext4", NULL);
+	if (!ext4_proc_root)
+		goto out5;
 
 	err = ext4_init_feat_adverts();
+	if (err)
+		goto out4;
 
 	err = ext4_init_mballoc();
 	if (err)
@@ -4849,12 +4861,14 @@
 out2:
 	ext4_exit_mballoc();
 out3:
-	kfree(ext4_feat);
-	remove_proc_entry("fs/ext4", NULL);
-	kset_unregister(ext4_kset);
+	ext4_exit_feat_adverts();
 out4:
-	ext4_exit_system_zone();
+	remove_proc_entry("fs/ext4", NULL);
 out5:
+	kset_unregister(ext4_kset);
+out6:
+	ext4_exit_system_zone();
+out7:
 	ext4_exit_pageio();
 	return err;
 }
@@ -4868,6 +4882,7 @@
 	destroy_inodecache();
 	ext4_exit_xattr();
 	ext4_exit_mballoc();
+	ext4_exit_feat_adverts();
 	remove_proc_entry("fs/ext4", NULL);
 	kset_unregister(ext4_kset);
 	ext4_exit_system_zone();
diff --git a/fs/fcntl.c b/fs/fcntl.c
index ecc8b39..cb10261 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -815,7 +815,7 @@
 		__O_SYNC	| O_DSYNC	| FASYNC	|
 		O_DIRECT	| O_LARGEFILE	| O_DIRECTORY	|
 		O_NOFOLLOW	| O_NOATIME	| O_CLOEXEC	|
-		FMODE_EXEC
+		__FMODE_EXEC
 		));
 
 	fasync_cache = kmem_cache_create("fasync_cache",
diff --git a/fs/file_table.c b/fs/file_table.c
index c3e89ad..eb36b6b 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -125,13 +125,13 @@
 		goto fail;
 
 	percpu_counter_inc(&nr_files);
+	f->f_cred = get_cred(cred);
 	if (security_file_alloc(f))
 		goto fail_sec;
 
 	INIT_LIST_HEAD(&f->f_u.fu_list);
 	atomic_long_set(&f->f_count, 1);
 	rwlock_init(&f->f_owner.lock);
-	f->f_cred = get_cred(cred);
 	spin_lock_init(&f->f_lock);
 	eventpoll_init_file(f);
 	/* f->f_version: 0 */
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 08a8beb..7cd9a5a 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1779,11 +1779,11 @@
 #endif
 
 	glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
-					  WQ_HIGHPRI | WQ_FREEZEABLE, 0);
+					  WQ_HIGHPRI | WQ_FREEZABLE, 0);
 	if (IS_ERR(glock_workqueue))
 		return PTR_ERR(glock_workqueue);
 	gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
-						WQ_MEM_RECLAIM | WQ_FREEZEABLE,
+						WQ_MEM_RECLAIM | WQ_FREEZABLE,
 						0);
 	if (IS_ERR(gfs2_delete_workqueue)) {
 		destroy_workqueue(glock_workqueue);
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 2232b3c..7aa7d4f 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -74,16 +74,14 @@
 }
 
 /**
- * GFS2 lookup code fills in vfs inode contents based on info obtained
- * from directory entry inside gfs2_inode_lookup(). This has caused issues
- * with NFS code path since its get_dentry routine doesn't have the relevant
- * directory entry when gfs2_inode_lookup() is invoked. Part of the code
- * segment inside gfs2_inode_lookup code needs to get moved around.
+ * gfs2_set_iop - Sets inode operations
+ * @inode: The inode with correct i_mode filled in
  *
- * Clears I_NEW as well.
- **/
+ * GFS2 lookup code fills in vfs inode contents based on info obtained
+ * from directory entry inside gfs2_inode_lookup().
+ */
 
-void gfs2_set_iop(struct inode *inode)
+static void gfs2_set_iop(struct inode *inode)
 {
 	struct gfs2_sbd *sdp = GFS2_SB(inode);
 	umode_t mode = inode->i_mode;
@@ -106,8 +104,6 @@
 		inode->i_op = &gfs2_file_iops;
 		init_special_inode(inode, inode->i_mode, inode->i_rdev);
 	}
-
-	unlock_new_inode(inode);
 }
 
 /**
@@ -119,10 +115,8 @@
  * Returns: A VFS inode, or an error
  */
 
-struct inode *gfs2_inode_lookup(struct super_block *sb,
-				unsigned int type,
-				u64 no_addr,
-				u64 no_formal_ino)
+struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
+				u64 no_addr, u64 no_formal_ino)
 {
 	struct inode *inode;
 	struct gfs2_inode *ip;
@@ -152,51 +146,37 @@
 		error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
 		if (unlikely(error))
 			goto fail_iopen;
-		ip->i_iopen_gh.gh_gl->gl_object = ip;
 
+		ip->i_iopen_gh.gh_gl->gl_object = ip;
 		gfs2_glock_put(io_gl);
 		io_gl = NULL;
 
-		if ((type == DT_UNKNOWN) && (no_formal_ino == 0))
-			goto gfs2_nfsbypass;
-
-		inode->i_mode = DT2IF(type);
-
-		/*
-		 * We must read the inode in order to work out its type in
-		 * this case. Note that this doesn't happen often as we normally
-		 * know the type beforehand. This code path only occurs during
-		 * unlinked inode recovery (where it is safe to do this glock,
-		 * which is not true in the general case).
-		 */
 		if (type == DT_UNKNOWN) {
-			struct gfs2_holder gh;
-			error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
-			if (unlikely(error))
-				goto fail_glock;
-			/* Inode is now uptodate */
-			gfs2_glock_dq_uninit(&gh);
+			/* Inode glock must be locked already */
+			error = gfs2_inode_refresh(GFS2_I(inode));
+			if (error)
+				goto fail_refresh;
+		} else {
+			inode->i_mode = DT2IF(type);
 		}
 
 		gfs2_set_iop(inode);
+		unlock_new_inode(inode);
 	}
 
-gfs2_nfsbypass:
 	return inode;
-fail_glock:
-	gfs2_glock_dq(&ip->i_iopen_gh);
+
+fail_refresh:
+	ip->i_iopen_gh.gh_gl->gl_object = NULL;
+	gfs2_glock_dq_uninit(&ip->i_iopen_gh);
 fail_iopen:
 	if (io_gl)
 		gfs2_glock_put(io_gl);
 fail_put:
-	if (inode->i_state & I_NEW)
-		ip->i_gl->gl_object = NULL;
+	ip->i_gl->gl_object = NULL;
 	gfs2_glock_put(ip->i_gl);
 fail:
-	if (inode->i_state & I_NEW)
-		iget_failed(inode);
-	else
-		iput(inode);
+	iget_failed(inode);
 	return ERR_PTR(error);
 }
 
@@ -221,14 +201,6 @@
 	if (IS_ERR(inode))
 		goto fail;
 
-	error = gfs2_inode_refresh(GFS2_I(inode));
-	if (error)
-		goto fail_iput;
-
-	/* Pick up the works we bypass in gfs2_inode_lookup */
-	if (inode->i_state & I_NEW) 
-		gfs2_set_iop(inode);
-
 	/* Two extra checks for NFS only */
 	if (no_formal_ino) {
 		error = -ESTALE;
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 732a183..3e00a66 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -96,7 +96,6 @@
 	return -EIO;
 }
 
-extern void gfs2_set_iop(struct inode *inode);
 extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, 
 				       u64 no_addr, u64 no_formal_ino);
 extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index ebef7ab..85ba027 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -144,7 +144,7 @@
 
 	error = -ENOMEM;
 	gfs_recovery_wq = alloc_workqueue("gfs_recovery",
-					  WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0);
+					  WQ_MEM_RECLAIM | WQ_FREEZABLE, 0);
 	if (!gfs_recovery_wq)
 		goto fail_wq;
 
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 16c2eca..ec73ed7 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1336,6 +1336,7 @@
 	if (error)
 		goto out_truncate;
 
+	ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
 	gfs2_glock_dq_wait(&ip->i_iopen_gh);
 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
 	error = gfs2_glock_nq(&ip->i_iopen_gh);
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index 52a0bca..b1991a2 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -397,8 +397,8 @@
 	u32 start, len, goal;
 	int res;
 
-	if (sbi->total_blocks - sbi->free_blocks + 8 >
-			sbi->alloc_file->i_size * 8) {
+	if (sbi->alloc_file->i_size * 8 <
+	    sbi->total_blocks - sbi->free_blocks + 8) {
 		/* extend alloc file */
 		printk(KERN_ERR "hfs: extend alloc file! "
 				"(%llu,%u,%u)\n",
diff --git a/fs/hfsplus/part_tbl.c b/fs/hfsplus/part_tbl.c
index d66ad11..40ad88c 100644
--- a/fs/hfsplus/part_tbl.c
+++ b/fs/hfsplus/part_tbl.c
@@ -134,7 +134,7 @@
 	res = hfsplus_submit_bio(sb->s_bdev, *part_start + HFS_PMAP_BLK,
 				 data, READ);
 	if (res)
-		return res;
+		goto out;
 
 	switch (be16_to_cpu(*((__be16 *)data))) {
 	case HFS_OLD_PMAP_MAGIC:
@@ -147,7 +147,7 @@
 		res = -ENOENT;
 		break;
 	}
-
+out:
 	kfree(data);
 	return res;
 }
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 9a3b479..b49b555 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -338,20 +338,22 @@
 	struct inode *root, *inode;
 	struct qstr str;
 	struct nls_table *nls = NULL;
-	int err = -EINVAL;
+	int err;
 
+	err = -EINVAL;
 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
 	if (!sbi)
-		return -ENOMEM;
+		goto out;
 
 	sb->s_fs_info = sbi;
 	mutex_init(&sbi->alloc_mutex);
 	mutex_init(&sbi->vh_mutex);
 	hfsplus_fill_defaults(sbi);
+
+	err = -EINVAL;
 	if (!hfsplus_parse_options(data, sbi)) {
 		printk(KERN_ERR "hfs: unable to parse mount options\n");
-		err = -EINVAL;
-		goto cleanup;
+		goto out_unload_nls;
 	}
 
 	/* temporarily use utf8 to correctly find the hidden dir below */
@@ -359,16 +361,14 @@
 	sbi->nls = load_nls("utf8");
 	if (!sbi->nls) {
 		printk(KERN_ERR "hfs: unable to load nls for utf8\n");
-		err = -EINVAL;
-		goto cleanup;
+		goto out_unload_nls;
 	}
 
 	/* Grab the volume header */
 	if (hfsplus_read_wrapper(sb)) {
 		if (!silent)
 			printk(KERN_WARNING "hfs: unable to find HFS+ superblock\n");
-		err = -EINVAL;
-		goto cleanup;
+		goto out_unload_nls;
 	}
 	vhdr = sbi->s_vhdr;
 
@@ -377,7 +377,7 @@
 	if (be16_to_cpu(vhdr->version) < HFSPLUS_MIN_VERSION ||
 	    be16_to_cpu(vhdr->version) > HFSPLUS_CURRENT_VERSION) {
 		printk(KERN_ERR "hfs: wrong filesystem version\n");
-		goto cleanup;
+		goto out_free_vhdr;
 	}
 	sbi->total_blocks = be32_to_cpu(vhdr->total_blocks);
 	sbi->free_blocks = be32_to_cpu(vhdr->free_blocks);
@@ -421,19 +421,19 @@
 	sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
 	if (!sbi->ext_tree) {
 		printk(KERN_ERR "hfs: failed to load extents file\n");
-		goto cleanup;
+		goto out_free_vhdr;
 	}
 	sbi->cat_tree = hfs_btree_open(sb, HFSPLUS_CAT_CNID);
 	if (!sbi->cat_tree) {
 		printk(KERN_ERR "hfs: failed to load catalog file\n");
-		goto cleanup;
+		goto out_close_ext_tree;
 	}
 
 	inode = hfsplus_iget(sb, HFSPLUS_ALLOC_CNID);
 	if (IS_ERR(inode)) {
 		printk(KERN_ERR "hfs: failed to load allocation file\n");
 		err = PTR_ERR(inode);
-		goto cleanup;
+		goto out_close_cat_tree;
 	}
 	sbi->alloc_file = inode;
 
@@ -442,14 +442,7 @@
 	if (IS_ERR(root)) {
 		printk(KERN_ERR "hfs: failed to load root directory\n");
 		err = PTR_ERR(root);
-		goto cleanup;
-	}
-	sb->s_d_op = &hfsplus_dentry_operations;
-	sb->s_root = d_alloc_root(root);
-	if (!sb->s_root) {
-		iput(root);
-		err = -ENOMEM;
-		goto cleanup;
+		goto out_put_alloc_file;
 	}
 
 	str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1;
@@ -459,46 +452,69 @@
 	if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
 		hfs_find_exit(&fd);
 		if (entry.type != cpu_to_be16(HFSPLUS_FOLDER))
-			goto cleanup;
+			goto out_put_root;
 		inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
 		if (IS_ERR(inode)) {
 			err = PTR_ERR(inode);
-			goto cleanup;
+			goto out_put_root;
 		}
 		sbi->hidden_dir = inode;
 	} else
 		hfs_find_exit(&fd);
 
-	if (sb->s_flags & MS_RDONLY)
-		goto out;
+	if (!(sb->s_flags & MS_RDONLY)) {
+		/*
+		 * H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused
+		 * all three are registered with Apple for our use
+		 */
+		vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION);
+		vhdr->modify_date = hfsp_now2mt();
+		be32_add_cpu(&vhdr->write_count, 1);
+		vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
+		vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
+		hfsplus_sync_fs(sb, 1);
 
-	/* H+LX == hfsplusutils, H+Lx == this driver, H+lx is unused
-	 * all three are registered with Apple for our use
-	 */
-	vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION);
-	vhdr->modify_date = hfsp_now2mt();
-	be32_add_cpu(&vhdr->write_count, 1);
-	vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
-	vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
-	hfsplus_sync_fs(sb, 1);
+		if (!sbi->hidden_dir) {
+			mutex_lock(&sbi->vh_mutex);
+			sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR);
+			hfsplus_create_cat(sbi->hidden_dir->i_ino, root, &str,
+					   sbi->hidden_dir);
+			mutex_unlock(&sbi->vh_mutex);
 
-	if (!sbi->hidden_dir) {
-		mutex_lock(&sbi->vh_mutex);
-		sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR);
-		hfsplus_create_cat(sbi->hidden_dir->i_ino, sb->s_root->d_inode,
-				   &str, sbi->hidden_dir);
-		mutex_unlock(&sbi->vh_mutex);
-
-		hfsplus_mark_inode_dirty(sbi->hidden_dir, HFSPLUS_I_CAT_DIRTY);
+			hfsplus_mark_inode_dirty(sbi->hidden_dir,
+						 HFSPLUS_I_CAT_DIRTY);
+		}
 	}
-out:
+
+	sb->s_d_op = &hfsplus_dentry_operations;
+	sb->s_root = d_alloc_root(root);
+	if (!sb->s_root) {
+		err = -ENOMEM;
+		goto out_put_hidden_dir;
+	}
+
 	unload_nls(sbi->nls);
 	sbi->nls = nls;
 	return 0;
 
-cleanup:
-	hfsplus_put_super(sb);
+out_put_hidden_dir:
+	iput(sbi->hidden_dir);
+out_put_root:
+	iput(sbi->alloc_file);
+out_put_alloc_file:
+	iput(sbi->alloc_file);
+out_close_cat_tree:
+	hfs_btree_close(sbi->cat_tree);
+out_close_ext_tree:
+	hfs_btree_close(sbi->ext_tree);
+out_free_vhdr:
+	kfree(sbi->s_vhdr);
+	kfree(sbi->s_backup_vhdr);
+out_unload_nls:
+	unload_nls(sbi->nls);
 	unload_nls(nls);
+	kfree(sbi);
+out:
 	return err;
 }
 
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 1962317..3031d81 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -167,7 +167,7 @@
 		break;
 	case cpu_to_be16(HFSP_WRAP_MAGIC):
 		if (!hfsplus_read_mdb(sbi->s_vhdr, &wd))
-			goto out;
+			goto out_free_backup_vhdr;
 		wd.ablk_size >>= HFSPLUS_SECTOR_SHIFT;
 		part_start += wd.ablk_start + wd.embed_start * wd.ablk_size;
 		part_size = wd.embed_count * wd.ablk_size;
@@ -179,7 +179,7 @@
 		 * (should do this only for cdrom/loop though)
 		 */
 		if (hfs_part_find(sb, &part_start, &part_size))
-			goto out;
+			goto out_free_backup_vhdr;
 		goto reread;
 	}
 
diff --git a/fs/ioctl.c b/fs/ioctl.c
index a59635e..1eebeb7 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -273,6 +273,13 @@
 		len = isize;
 	}
 
+	/*
+	 * Some filesystems can't deal with being asked to map less than
+	 * blocksize, so make sure our len is at least block length.
+	 */
+	if (logical_to_blk(inode, len) == 0)
+		len = blk_to_logical(inode, 1);
+
 	start_blk = logical_to_blk(inode, start);
 	last_blk = logical_to_blk(inode, start + len - 1);
 
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 9e46869..97e7346 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -473,7 +473,8 @@
 }
 
 /*
- * Called under j_state_lock.  Returns true if a transaction commit was started.
+ * Called with j_state_lock locked for writing.
+ * Returns true if a transaction commit was started.
  */
 int __jbd2_log_start_commit(journal_t *journal, tid_t target)
 {
@@ -520,11 +521,13 @@
 {
 	transaction_t *transaction = NULL;
 	tid_t tid;
+	int need_to_start = 0;
 
 	read_lock(&journal->j_state_lock);
 	if (journal->j_running_transaction && !current->journal_info) {
 		transaction = journal->j_running_transaction;
-		__jbd2_log_start_commit(journal, transaction->t_tid);
+		if (!tid_geq(journal->j_commit_request, transaction->t_tid))
+			need_to_start = 1;
 	} else if (journal->j_committing_transaction)
 		transaction = journal->j_committing_transaction;
 
@@ -535,6 +538,8 @@
 
 	tid = transaction->t_tid;
 	read_unlock(&journal->j_state_lock);
+	if (need_to_start)
+		jbd2_log_start_commit(journal, tid);
 	jbd2_log_wait_commit(journal, tid);
 	return 1;
 }
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index faad2bd..1d11910 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -117,10 +117,10 @@
 static int start_this_handle(journal_t *journal, handle_t *handle,
 			     int gfp_mask)
 {
-	transaction_t *transaction;
-	int needed;
-	int nblocks = handle->h_buffer_credits;
-	transaction_t *new_transaction = NULL;
+	transaction_t	*transaction, *new_transaction = NULL;
+	tid_t		tid;
+	int		needed, need_to_start;
+	int		nblocks = handle->h_buffer_credits;
 
 	if (nblocks > journal->j_max_transaction_buffers) {
 		printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n",
@@ -222,8 +222,11 @@
 		atomic_sub(nblocks, &transaction->t_outstanding_credits);
 		prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
 				TASK_UNINTERRUPTIBLE);
-		__jbd2_log_start_commit(journal, transaction->t_tid);
+		tid = transaction->t_tid;
+		need_to_start = !tid_geq(journal->j_commit_request, tid);
 		read_unlock(&journal->j_state_lock);
+		if (need_to_start)
+			jbd2_log_start_commit(journal, tid);
 		schedule();
 		finish_wait(&journal->j_wait_transaction_locked, &wait);
 		goto repeat;
@@ -442,7 +445,8 @@
 {
 	transaction_t *transaction = handle->h_transaction;
 	journal_t *journal = transaction->t_journal;
-	int ret;
+	tid_t		tid;
+	int		need_to_start, ret;
 
 	/* If we've had an abort of any type, don't even think about
 	 * actually doing the restart! */
@@ -465,8 +469,11 @@
 	spin_unlock(&transaction->t_handle_lock);
 
 	jbd_debug(2, "restarting handle %p\n", handle);
-	__jbd2_log_start_commit(journal, transaction->t_tid);
+	tid = transaction->t_tid;
+	need_to_start = !tid_geq(journal->j_commit_request, tid);
 	read_unlock(&journal->j_state_lock);
+	if (need_to_start)
+		jbd2_log_start_commit(journal, tid);
 
 	lock_map_release(&handle->h_lockdep_map);
 	handle->h_buffer_credits = nblocks;
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 5f1bcb2..b7c99bf 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -520,7 +520,7 @@
 					struct nsm_handle *nsm,
 					const struct nlm_reboot *info)
 {
-	struct nlm_host *host = NULL;
+	struct nlm_host *host;
 	struct hlist_head *chain;
 	struct hlist_node *pos;
 
@@ -532,12 +532,13 @@
 			host->h_state++;
 
 			nlm_get_host(host);
-			goto out;
+			mutex_unlock(&nlm_host_mutex);
+			return host;
 		}
 	}
-out:
+
 	mutex_unlock(&nlm_host_mutex);
-	return host;
+	return NULL;
 }
 
 /**
diff --git a/fs/namei.c b/fs/namei.c
index 7d77f24..0087cf9 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -455,14 +455,6 @@
 	struct fs_struct *fs = current->fs;
 	struct dentry *parent = nd->path.dentry;
 
-	/*
-	 * It can be possible to revalidate the dentry that we started
-	 * the path walk with. force_reval_path may also revalidate the
-	 * dentry already committed to the nameidata.
-	 */
-	if (unlikely(parent == dentry))
-		return nameidata_drop_rcu(nd);
-
 	BUG_ON(!(nd->flags & LOOKUP_RCU));
 	if (nd->root.mnt) {
 		spin_lock(&fs->lock);
@@ -561,39 +553,25 @@
  */
 void release_open_intent(struct nameidata *nd)
 {
-	if (nd->intent.open.file->f_path.dentry == NULL)
-		put_filp(nd->intent.open.file);
-	else
-		fput(nd->intent.open.file);
-}
+	struct file *file = nd->intent.open.file;
 
-/*
- * Call d_revalidate and handle filesystems that request rcu-walk
- * to be dropped. This may be called and return in rcu-walk mode,
- * regardless of success or error. If -ECHILD is returned, the caller
- * must return -ECHILD back up the path walk stack so path walk may
- * be restarted in ref-walk mode.
- */
-static int d_revalidate(struct dentry *dentry, struct nameidata *nd)
-{
-	int status;
-
-	status = dentry->d_op->d_revalidate(dentry, nd);
-	if (status == -ECHILD) {
-		if (nameidata_dentry_drop_rcu(nd, dentry))
-			return status;
-		status = dentry->d_op->d_revalidate(dentry, nd);
+	if (file && !IS_ERR(file)) {
+		if (file->f_path.dentry == NULL)
+			put_filp(file);
+		else
+			fput(file);
 	}
-
-	return status;
 }
 
-static inline struct dentry *
+static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+	return dentry->d_op->d_revalidate(dentry, nd);
+}
+
+static struct dentry *
 do_revalidate(struct dentry *dentry, struct nameidata *nd)
 {
-	int status;
-
-	status = d_revalidate(dentry, nd);
+	int status = d_revalidate(dentry, nd);
 	if (unlikely(status <= 0)) {
 		/*
 		 * The dentry failed validation.
@@ -602,24 +580,39 @@
 		 * to return a fail status.
 		 */
 		if (status < 0) {
-			/* If we're in rcu-walk, we don't have a ref */
-			if (!(nd->flags & LOOKUP_RCU))
-				dput(dentry);
+			dput(dentry);
 			dentry = ERR_PTR(status);
-
-		} else {
-			/* Don't d_invalidate in rcu-walk mode */
-			if (nameidata_dentry_drop_rcu_maybe(nd, dentry))
-				return ERR_PTR(-ECHILD);
-			if (!d_invalidate(dentry)) {
-				dput(dentry);
-				dentry = NULL;
-			}
+		} else if (!d_invalidate(dentry)) {
+			dput(dentry);
+			dentry = NULL;
 		}
 	}
 	return dentry;
 }
 
+static inline struct dentry *
+do_revalidate_rcu(struct dentry *dentry, struct nameidata *nd)
+{
+	int status = d_revalidate(dentry, nd);
+	if (likely(status > 0))
+		return dentry;
+	if (status == -ECHILD) {
+		if (nameidata_dentry_drop_rcu(nd, dentry))
+			return ERR_PTR(-ECHILD);
+		return do_revalidate(dentry, nd);
+	}
+	if (status < 0)
+		return ERR_PTR(status);
+	/* Don't d_invalidate in rcu-walk mode */
+	if (nameidata_dentry_drop_rcu(nd, dentry))
+		return ERR_PTR(-ECHILD);
+	if (!d_invalidate(dentry)) {
+		dput(dentry);
+		dentry = NULL;
+	}
+	return dentry;
+}
+
 static inline int need_reval_dot(struct dentry *dentry)
 {
 	if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE)))
@@ -664,9 +657,6 @@
 		return 0;
 
 	if (!status) {
-		/* Don't d_invalidate in rcu-walk mode */
-		if (nameidata_drop_rcu(nd))
-			return -ECHILD;
 		d_invalidate(dentry);
 		status = -ESTALE;
 	}
@@ -773,6 +763,8 @@
 	int error;
 	struct dentry *dentry = link->dentry;
 
+	BUG_ON(nd->flags & LOOKUP_RCU);
+
 	touch_atime(link->mnt, dentry);
 	nd_set_link(nd, NULL);
 
@@ -803,10 +795,16 @@
  * Without that kind of total limit, nasty chains of consecutive
  * symlinks can cause almost arbitrarily long lookups. 
  */
-static inline int do_follow_link(struct path *path, struct nameidata *nd)
+static inline int do_follow_link(struct inode *inode, struct path *path, struct nameidata *nd)
 {
 	void *cookie;
 	int err = -ELOOP;
+
+	/* We drop rcu-walk here */
+	if (nameidata_dentry_drop_rcu_maybe(nd, path->dentry))
+		return -ECHILD;
+	BUG_ON(inode != path->dentry->d_inode);
+
 	if (current->link_count >= MAX_NESTED_LINKS)
 		goto loop;
 	if (current->total_link_count >= 40)
@@ -1251,9 +1249,15 @@
 			return -ECHILD;
 
 		nd->seq = seq;
-		if (dentry->d_flags & DCACHE_OP_REVALIDATE)
-			goto need_revalidate;
-done2:
+		if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
+			dentry = do_revalidate_rcu(dentry, nd);
+			if (!dentry)
+				goto need_lookup;
+			if (IS_ERR(dentry))
+				goto fail;
+			if (!(nd->flags & LOOKUP_RCU))
+				goto done;
+		}
 		path->mnt = mnt;
 		path->dentry = dentry;
 		if (likely(__follow_mount_rcu(nd, path, inode, false)))
@@ -1266,8 +1270,13 @@
 	if (!dentry)
 		goto need_lookup;
 found:
-	if (dentry->d_flags & DCACHE_OP_REVALIDATE)
-		goto need_revalidate;
+	if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
+		dentry = do_revalidate(dentry, nd);
+		if (!dentry)
+			goto need_lookup;
+		if (IS_ERR(dentry))
+			goto fail;
+	}
 done:
 	path->mnt = mnt;
 	path->dentry = dentry;
@@ -1309,16 +1318,6 @@
 	mutex_unlock(&dir->i_mutex);
 	goto found;
 
-need_revalidate:
-	dentry = do_revalidate(dentry, nd);
-	if (!dentry)
-		goto need_lookup;
-	if (IS_ERR(dentry))
-		goto fail;
-	if (nd->flags & LOOKUP_RCU)
-		goto done2;
-	goto done;
-
 fail:
 	return PTR_ERR(dentry);
 }
@@ -1415,11 +1414,7 @@
 			goto out_dput;
 
 		if (inode->i_op->follow_link) {
-			/* We commonly drop rcu-walk here */
-			if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry))
-				return -ECHILD;
-			BUG_ON(inode != next.dentry->d_inode);
-			err = do_follow_link(&next, nd);
+			err = do_follow_link(inode, &next, nd);
 			if (err)
 				goto return_err;
 			nd->inode = nd->path.dentry->d_inode;
@@ -1463,10 +1458,7 @@
 			break;
 		if (inode && unlikely(inode->i_op->follow_link) &&
 		    (lookup_flags & LOOKUP_FOLLOW)) {
-			if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry))
-				return -ECHILD;
-			BUG_ON(inode != next.dentry->d_inode);
-			err = do_follow_link(&next, nd);
+			err = do_follow_link(inode, &next, nd);
 			if (err)
 				goto return_err;
 			nd->inode = nd->path.dentry->d_inode;
@@ -1500,12 +1492,15 @@
 		 * We may need to check the cached dentry for staleness.
 		 */
 		if (need_reval_dot(nd->path.dentry)) {
+			if (nameidata_drop_rcu_last_maybe(nd))
+				return -ECHILD;
 			/* Note: we do not d_invalidate() */
 			err = d_revalidate(nd->path.dentry, nd);
 			if (!err)
 				err = -ESTALE;
 			if (err < 0)
 				break;
+			return 0;
 		}
 return_base:
 		if (nameidata_drop_rcu_last_maybe(nd))
@@ -2265,8 +2260,6 @@
 	return filp;
 
 exit:
-	if (!IS_ERR(nd->intent.open.file))
-		release_open_intent(nd);
 	path_put(&nd->path);
 	return ERR_PTR(error);
 }
@@ -2389,8 +2382,6 @@
 exit_dput:
 	path_put_conditional(path, nd);
 exit:
-	if (!IS_ERR(nd->intent.open.file))
-		release_open_intent(nd);
 	path_put(&nd->path);
 	return ERR_PTR(error);
 }
@@ -2477,6 +2468,7 @@
 	}
 	audit_inode(pathname, nd.path.dentry);
 	filp = finish_open(&nd, open_flag, acc_mode);
+	release_open_intent(&nd);
 	return filp;
 
 creat:
@@ -2553,6 +2545,7 @@
 		path_put(&nd.root);
 	if (filp == ERR_PTR(-ESTALE) && !(flags & LOOKUP_REVAL))
 		goto reval;
+	release_open_intent(&nd);
 	return filp;
 
 exit_dput:
@@ -2560,8 +2553,6 @@
 out_path:
 	path_put(&nd.path);
 out_filp:
-	if (!IS_ERR(nd.intent.open.file))
-		release_open_intent(&nd);
 	filp = ERR_PTR(error);
 	goto out;
 }
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 1990165..e3d2942 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -135,33 +135,6 @@
 
 #if defined(CONFIG_NFS_V4_1)
 /*
- *  * CB_SEQUENCE operations will fail until the callback sessionid is set.
- *   */
-int nfs4_set_callback_sessionid(struct nfs_client *clp)
-{
-	struct svc_serv *serv = clp->cl_rpcclient->cl_xprt->bc_serv;
-	struct nfs4_sessionid *bc_sid;
-
-	if (!serv->sv_bc_xprt)
-		return -EINVAL;
-
-	/* on success freed in xprt_free */
-	bc_sid = kmalloc(sizeof(struct nfs4_sessionid), GFP_KERNEL);
-	if (!bc_sid)
-		return -ENOMEM;
-	memcpy(bc_sid->data, &clp->cl_session->sess_id.data,
-		NFS4_MAX_SESSIONID_LEN);
-	spin_lock_bh(&serv->sv_cb_lock);
-	serv->sv_bc_xprt->xpt_bc_sid = bc_sid;
-	spin_unlock_bh(&serv->sv_cb_lock);
-	dprintk("%s set xpt_bc_sid=%u:%u:%u:%u for sv_bc_xprt %p\n", __func__,
-		((u32 *)bc_sid->data)[0], ((u32 *)bc_sid->data)[1],
-		((u32 *)bc_sid->data)[2], ((u32 *)bc_sid->data)[3],
-		serv->sv_bc_xprt);
-	return 0;
-}
-
-/*
  * The callback service for NFSv4.1 callbacks
  */
 static int
@@ -266,10 +239,6 @@
 		struct nfs_callback_data *cb_info)
 {
 }
-int nfs4_set_callback_sessionid(struct nfs_client *clp)
-{
-	return 0;
-}
 #endif /* CONFIG_NFS_V4_1 */
 
 /*
@@ -359,78 +328,58 @@
 	mutex_unlock(&nfs_callback_mutex);
 }
 
-static int check_gss_callback_principal(struct nfs_client *clp,
-					struct svc_rqst *rqstp)
+/* Boolean check of RPC_AUTH_GSS principal */
+int
+check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp)
 {
 	struct rpc_clnt *r = clp->cl_rpcclient;
 	char *p = svc_gss_principal(rqstp);
 
+	if (rqstp->rq_authop->flavour != RPC_AUTH_GSS)
+		return 1;
+
 	/* No RPC_AUTH_GSS on NFSv4.1 back channel yet */
 	if (clp->cl_minorversion != 0)
-		return SVC_DROP;
+		return 0;
 	/*
 	 * It might just be a normal user principal, in which case
 	 * userspace won't bother to tell us the name at all.
 	 */
 	if (p == NULL)
-		return SVC_DENIED;
+		return 0;
 
 	/* Expect a GSS_C_NT_HOSTBASED_NAME like "nfs@serverhostname" */
 
 	if (memcmp(p, "nfs@", 4) != 0)
-		return SVC_DENIED;
+		return 0;
 	p += 4;
 	if (strcmp(p, r->cl_server) != 0)
-		return SVC_DENIED;
-	return SVC_OK;
+		return 0;
+	return 1;
 }
 
-/* pg_authenticate method helper */
-static struct nfs_client *nfs_cb_find_client(struct svc_rqst *rqstp)
-{
-	struct nfs4_sessionid *sessionid = bc_xprt_sid(rqstp);
-	int is_cb_compound = rqstp->rq_proc == CB_COMPOUND ? 1 : 0;
-
-	dprintk("--> %s rq_proc %d\n", __func__, rqstp->rq_proc);
-	if (svc_is_backchannel(rqstp))
-		/* Sessionid (usually) set after CB_NULL ping */
-		return nfs4_find_client_sessionid(svc_addr(rqstp), sessionid,
-						  is_cb_compound);
-	else
-		/* No callback identifier in pg_authenticate */
-		return nfs4_find_client_no_ident(svc_addr(rqstp));
-}
-
-/* pg_authenticate method for nfsv4 callback threads. */
+/*
+ * pg_authenticate method for nfsv4 callback threads.
+ *
+ * The authflavor has been negotiated, so an incorrect flavor is a server
+ * bug. Drop packets with incorrect authflavor.
+ *
+ * All other checking done after NFS decoding where the nfs_client can be
+ * found in nfs4_callback_compound
+ */
 static int nfs_callback_authenticate(struct svc_rqst *rqstp)
 {
-	struct nfs_client *clp;
-	RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
-	int ret = SVC_OK;
-
-	/* Don't talk to strangers */
-	clp = nfs_cb_find_client(rqstp);
-	if (clp == NULL)
-		return SVC_DROP;
-
-	dprintk("%s: %s NFSv4 callback!\n", __func__,
-			svc_print_addr(rqstp, buf, sizeof(buf)));
-
 	switch (rqstp->rq_authop->flavour) {
-		case RPC_AUTH_NULL:
-			if (rqstp->rq_proc != CB_NULL)
-				ret = SVC_DENIED;
-			break;
-		case RPC_AUTH_UNIX:
-			break;
-		case RPC_AUTH_GSS:
-			ret = check_gss_callback_principal(clp, rqstp);
-			break;
-		default:
-			ret = SVC_DENIED;
+	case RPC_AUTH_NULL:
+		if (rqstp->rq_proc != CB_NULL)
+			return SVC_DROP;
+		break;
+	case RPC_AUTH_GSS:
+		/* No RPC_AUTH_GSS support yet in NFSv4.1 */
+		 if (svc_is_backchannel(rqstp))
+			return SVC_DROP;
 	}
-	nfs_put_client(clp);
-	return ret;
+	return SVC_OK;
 }
 
 /*
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index d3b44f9..46d93ce 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -7,6 +7,7 @@
  */
 #ifndef __LINUX_FS_NFS_CALLBACK_H
 #define __LINUX_FS_NFS_CALLBACK_H
+#include <linux/sunrpc/svc.h>
 
 #define NFS4_CALLBACK 0x40000000
 #define NFS4_CALLBACK_XDRSIZE 2048
@@ -37,7 +38,6 @@
 struct cb_process_state {
 	__be32			drc_status;
 	struct nfs_client	*clp;
-	struct nfs4_sessionid	*svc_sid; /* v4.1 callback service sessionid */
 };
 
 struct cb_compound_hdr_arg {
@@ -168,7 +168,7 @@
 extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses);
 extern void nfs4_cb_take_slot(struct nfs_client *clp);
 #endif /* CONFIG_NFS_V4_1 */
-
+extern int check_gss_callback_principal(struct nfs_client *, struct svc_rqst *);
 extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args,
 				    struct cb_getattrres *res,
 				    struct cb_process_state *cps);
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 4bb91cb..8958757 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -373,17 +373,11 @@
 {
 	struct nfs_client *clp;
 	int i;
-	__be32 status;
+	__be32 status = htonl(NFS4ERR_BADSESSION);
 
 	cps->clp = NULL;
 
-	status = htonl(NFS4ERR_BADSESSION);
-	/* Incoming session must match the callback session */
-	if (memcmp(&args->csa_sessionid, cps->svc_sid, NFS4_MAX_SESSIONID_LEN))
-		goto out;
-
-	clp = nfs4_find_client_sessionid(args->csa_addr,
-					 &args->csa_sessionid, 1);
+	clp = nfs4_find_client_sessionid(args->csa_addr, &args->csa_sessionid);
 	if (clp == NULL)
 		goto out;
 
@@ -414,9 +408,9 @@
 	res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
 	res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
 	nfs4_cb_take_slot(clp);
-	cps->clp = clp; /* put in nfs4_callback_compound */
 
 out:
+	cps->clp = clp; /* put in nfs4_callback_compound */
 	for (i = 0; i < args->csa_nrclists; i++)
 		kfree(args->csa_rclists[i].rcl_refcalls);
 	kfree(args->csa_rclists);
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 23112c2..14e0f93 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -794,10 +794,9 @@
 
 	if (hdr_arg.minorversion == 0) {
 		cps.clp = nfs4_find_client_ident(hdr_arg.cb_ident);
-		if (!cps.clp)
+		if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp))
 			return rpc_drop_reply;
-	} else
-		cps.svc_sid = bc_xprt_sid(rqstp);
+	}
 
 	hdr_res.taglen = hdr_arg.taglen;
 	hdr_res.tag = hdr_arg.tag;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 192f2f8..bd3ca32 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1206,16 +1206,11 @@
  * For CB_COMPOUND calls, find a client by IP address, protocol version,
  * minorversion, and sessionID
  *
- * CREATE_SESSION triggers a CB_NULL ping from servers. The callback service
- * sessionid can only be set after the CREATE_SESSION return, so a CB_NULL
- * can arrive before the callback sessionid is set. For CB_NULL calls,
- * find a client by IP address protocol version, and minorversion.
- *
  * Returns NULL if no such client
  */
 struct nfs_client *
 nfs4_find_client_sessionid(const struct sockaddr *addr,
-			   struct nfs4_sessionid *sid, int is_cb_compound)
+			   struct nfs4_sessionid *sid)
 {
 	struct nfs_client *clp;
 
@@ -1227,9 +1222,9 @@
 		if (!nfs4_has_session(clp))
 			continue;
 
-		/* Match sessionid unless cb_null call*/
-		if (is_cb_compound && (memcmp(clp->cl_session->sess_id.data,
-		    sid->data, NFS4_MAX_SESSIONID_LEN) != 0))
+		/* Match sessionid*/
+		if (memcmp(clp->cl_session->sess_id.data,
+		    sid->data, NFS4_MAX_SESSIONID_LEN) != 0)
 			continue;
 
 		atomic_inc(&clp->cl_count);
@@ -1244,7 +1239,7 @@
 
 struct nfs_client *
 nfs4_find_client_sessionid(const struct sockaddr *addr,
-			   struct nfs4_sessionid *sid, int is_cb_compound)
+			   struct nfs4_sessionid *sid)
 {
 	return NULL;
 }
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 364e432..bbbc6bf 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -23,8 +23,6 @@
 
 static void nfs_do_free_delegation(struct nfs_delegation *delegation)
 {
-	if (delegation->cred)
-		put_rpccred(delegation->cred);
 	kfree(delegation);
 }
 
@@ -37,6 +35,10 @@
 
 static void nfs_free_delegation(struct nfs_delegation *delegation)
 {
+	if (delegation->cred) {
+		put_rpccred(delegation->cred);
+		delegation->cred = NULL;
+	}
 	call_rcu(&delegation->rcu, nfs_free_delegation_callback);
 }
 
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index e6ace0d..9943a75 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -407,15 +407,18 @@
 		pos += vec->iov_len;
 	}
 
+	/*
+	 * If no bytes were started, return the error, and let the
+	 * generic layer handle the completion.
+	 */
+	if (requested_bytes == 0) {
+		nfs_direct_req_release(dreq);
+		return result < 0 ? result : -EIO;
+	}
+
 	if (put_dreq(dreq))
 		nfs_direct_complete(dreq);
-
-	if (requested_bytes != 0)
-		return 0;
-
-	if (result < 0)
-		return result;
-	return -EIO;
+	return 0;
 }
 
 static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
@@ -841,15 +844,18 @@
 		pos += vec->iov_len;
 	}
 
+	/*
+	 * If no bytes were started, return the error, and let the
+	 * generic layer handle the completion.
+	 */
+	if (requested_bytes == 0) {
+		nfs_direct_req_release(dreq);
+		return result < 0 ? result : -EIO;
+	}
+
 	if (put_dreq(dreq))
 		nfs_direct_write_complete(dreq, dreq->inode);
-
-	if (requested_bytes != 0)
-		return 0;
-
-	if (result < 0)
-		return result;
-	return -EIO;
+	return 0;
 }
 
 static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index d851242..1cc600e 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -881,9 +881,10 @@
 	return ret;
 }
 
-static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
 {
 	struct nfs_inode *nfsi = NFS_I(inode);
+	unsigned long ret = 0;
 
 	if ((fattr->valid & NFS_ATTR_FATTR_PRECHANGE)
 			&& (fattr->valid & NFS_ATTR_FATTR_CHANGE)
@@ -891,25 +892,32 @@
 		nfsi->change_attr = fattr->change_attr;
 		if (S_ISDIR(inode->i_mode))
 			nfsi->cache_validity |= NFS_INO_INVALID_DATA;
+		ret |= NFS_INO_INVALID_ATTR;
 	}
 	/* If we have atomic WCC data, we may update some attributes */
 	if ((fattr->valid & NFS_ATTR_FATTR_PRECTIME)
 			&& (fattr->valid & NFS_ATTR_FATTR_CTIME)
-			&& timespec_equal(&inode->i_ctime, &fattr->pre_ctime))
-			memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
+			&& timespec_equal(&inode->i_ctime, &fattr->pre_ctime)) {
+		memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
+		ret |= NFS_INO_INVALID_ATTR;
+	}
 
 	if ((fattr->valid & NFS_ATTR_FATTR_PREMTIME)
 			&& (fattr->valid & NFS_ATTR_FATTR_MTIME)
 			&& timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) {
-			memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
-			if (S_ISDIR(inode->i_mode))
-				nfsi->cache_validity |= NFS_INO_INVALID_DATA;
+		memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
+		if (S_ISDIR(inode->i_mode))
+			nfsi->cache_validity |= NFS_INO_INVALID_DATA;
+		ret |= NFS_INO_INVALID_ATTR;
 	}
 	if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE)
 			&& (fattr->valid & NFS_ATTR_FATTR_SIZE)
 			&& i_size_read(inode) == nfs_size_to_loff_t(fattr->pre_size)
-			&& nfsi->npages == 0)
-			i_size_write(inode, nfs_size_to_loff_t(fattr->size));
+			&& nfsi->npages == 0) {
+		i_size_write(inode, nfs_size_to_loff_t(fattr->size));
+		ret |= NFS_INO_INVALID_ATTR;
+	}
+	return ret;
 }
 
 /**
@@ -1223,7 +1231,7 @@
 			| NFS_INO_REVAL_PAGECACHE);
 
 	/* Do atomic weak cache consistency updates */
-	nfs_wcc_update_inode(inode, fattr);
+	invalid |= nfs_wcc_update_inode(inode, fattr);
 
 	/* More cache consistency checks */
 	if (fattr->valid & NFS_ATTR_FATTR_CHANGE) {
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 4644f04..cf9fdbd 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -133,8 +133,7 @@
 extern struct nfs_client *nfs4_find_client_no_ident(const struct sockaddr *);
 extern struct nfs_client *nfs4_find_client_ident(int);
 extern struct nfs_client *
-nfs4_find_client_sessionid(const struct sockaddr *, struct nfs4_sessionid *,
-			   int);
+nfs4_find_client_sessionid(const struct sockaddr *, struct nfs4_sessionid *);
 extern struct nfs_server *nfs_create_server(
 					const struct nfs_parsed_mount_data *,
 					struct nfs_fh *);
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 9f88c5f..2743427 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -311,8 +311,8 @@
 	if (!nfs_server_capable(inode, NFS_CAP_ACLS))
 		goto out;
 
-	/* We are doing this here, because XDR marshalling can only
-	   return -ENOMEM. */
+	/* We are doing this here because XDR marshalling does not
+	 * return any results, it BUGs. */
 	status = -ENOSPC;
 	if (acl != NULL && acl->a_count > NFS_ACL_MAX_ENTRIES)
 		goto out;
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 01c5e8b..183c6b1 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -1328,10 +1328,13 @@
 
 	encode_nfs_fh3(xdr, NFS_FH(args->inode));
 	encode_uint32(xdr, args->mask);
-	if (args->npages != 0)
-		xdr_write_pages(xdr, args->pages, 0, args->len);
 
 	base = req->rq_slen;
+	if (args->npages != 0)
+		xdr_write_pages(xdr, args->pages, 0, args->len);
+	else
+		xdr_reserve_space(xdr, NFS_ACL_INLINE_BUFSIZE);
+
 	error = nfsacl_encode(xdr->buf, base, args->inode,
 			    (args->mask & NFS_ACL) ?
 			    args->acl_access : NULL, 1, 0);
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index 51fe64a..f5c9b12 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -214,7 +214,7 @@
 
 	/* ipv6 length plus port is legal */
 	if (rlen > INET6_ADDRSTRLEN + 8) {
-		dprintk("%s Invalid address, length %d\n", __func__,
+		dprintk("%s: Invalid address, length %d\n", __func__,
 			rlen);
 		goto out_err;
 	}
@@ -225,6 +225,11 @@
 	/* replace the port dots with dashes for the in4_pton() delimiter*/
 	for (i = 0; i < 2; i++) {
 		char *res = strrchr(buf, '.');
+		if (!res) {
+			dprintk("%s: Failed finding expected dots in port\n",
+				__func__);
+			goto out_free;
+		}
 		*res = '-';
 	}
 
@@ -240,7 +245,7 @@
 	port = htons((tmp[0] << 8) | (tmp[1]));
 
 	ds = nfs4_pnfs_ds_add(inode, ip_addr, port);
-	dprintk("%s Decoded address and port %s\n", __func__, buf);
+	dprintk("%s: Decoded address and port %s\n", __func__, buf);
 out_free:
 	kfree(buf);
 out_err:
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 9d992b0..78936a8 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -50,6 +50,7 @@
 #include <linux/module.h>
 #include <linux/sunrpc/bc_xprt.h>
 #include <linux/xattr.h>
+#include <linux/utsname.h>
 
 #include "nfs4_fs.h"
 #include "delegation.h"
@@ -4572,27 +4573,16 @@
 	*p = htonl((u32)clp->cl_boot_time.tv_nsec);
 	args.verifier = &verifier;
 
-	while (1) {
-		args.id_len = scnprintf(args.id, sizeof(args.id),
-					"%s/%s %u",
-					clp->cl_ipaddr,
-					rpc_peeraddr2str(clp->cl_rpcclient,
-							 RPC_DISPLAY_ADDR),
-					clp->cl_id_uniquifier);
+	args.id_len = scnprintf(args.id, sizeof(args.id),
+				"%s/%s.%s/%u",
+				clp->cl_ipaddr,
+				init_utsname()->nodename,
+				init_utsname()->domainname,
+				clp->cl_rpcclient->cl_auth->au_flavor);
 
-		status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
-
-		if (status != -NFS4ERR_CLID_INUSE)
-			break;
-
-		if (signalled())
-			break;
-
-		if (++clp->cl_id_uniquifier == 0)
-			break;
-	}
-
-	status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
+	status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
+	if (!status)
+		status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
 	dprintk("<-- %s status= %d\n", __func__, status);
 	return status;
 }
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 2336d53..e6742b5 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -232,12 +232,6 @@
 	status = nfs4_proc_create_session(clp);
 	if (status != 0)
 		goto out;
-	status = nfs4_set_callback_sessionid(clp);
-	if (status != 0) {
-		printk(KERN_WARNING "Sessionid not set. No callback service\n");
-		nfs_callback_down(1);
-		status = 0;
-	}
 	nfs41_setup_state_renewal(clp);
 	nfs_mark_client_ready(clp, NFS_CS_READY);
 out:
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 2ab8e5c..4e2c168 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -6086,11 +6086,11 @@
 	__be32 *p = xdr_inline_decode(xdr, 4);
 	if (unlikely(!p))
 		goto out_overflow;
-	if (!ntohl(*p++)) {
+	if (*p == xdr_zero) {
 		p = xdr_inline_decode(xdr, 4);
 		if (unlikely(!p))
 			goto out_overflow;
-		if (!ntohl(*p++))
+		if (*p == xdr_zero)
 			return -EAGAIN;
 		entry->eof = 1;
 		return -EBADCOOKIE;
@@ -6101,7 +6101,7 @@
 		goto out_overflow;
 	entry->prev_cookie = entry->cookie;
 	p = xdr_decode_hyper(p, &entry->cookie);
-	entry->len = ntohl(*p++);
+	entry->len = be32_to_cpup(p);
 
 	p = xdr_inline_decode(xdr, entry->len);
 	if (unlikely(!p))
@@ -6132,9 +6132,6 @@
 	if (entry->fattr->valid & NFS_ATTR_FATTR_TYPE)
 		entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
 
-	if (verify_attr_len(xdr, p, len) < 0)
-		goto out_overflow;
-
 	return 0;
 
 out_overflow:
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index bc40897..1b1bc1a 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -951,7 +951,7 @@
 {
 	struct pnfs_deviceid_cache *local = clp->cl_devid_cache;
 
-	dprintk("--> %s cl_devid_cache %p\n", __func__, clp->cl_devid_cache);
+	dprintk("--> %s ({%d})\n", __func__, atomic_read(&local->dc_ref));
 	if (atomic_dec_and_lock(&local->dc_ref, &clp->cl_lock)) {
 		int i;
 		/* Verify cache is empty */
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 10d648e..c8278f4 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -932,7 +932,7 @@
 	while (!list_empty(&list)) {
 		data = list_entry(list.next, struct nfs_write_data, pages);
 		list_del(&data->pages);
-		nfs_writedata_release(data);
+		nfs_writedata_free(data);
 	}
 	nfs_redirty_request(req);
 	return -ENOMEM;
diff --git a/fs/nfs_common/nfsacl.c b/fs/nfs_common/nfsacl.c
index fc1c525..84c27d6 100644
--- a/fs/nfs_common/nfsacl.c
+++ b/fs/nfs_common/nfsacl.c
@@ -42,6 +42,11 @@
 	gid_t gid;
 };
 
+struct nfsacl_simple_acl {
+	struct posix_acl acl;
+	struct posix_acl_entry ace[4];
+};
+
 static int
 xdr_nfsace_encode(struct xdr_array2_desc *desc, void *elem)
 {
@@ -72,9 +77,20 @@
 	return 0;
 }
 
-unsigned int
-nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
-	      struct posix_acl *acl, int encode_entries, int typeflag)
+/**
+ * nfsacl_encode - Encode an NFSv3 ACL
+ *
+ * @buf: destination xdr_buf to contain XDR encoded ACL
+ * @base: byte offset in xdr_buf where XDR'd ACL begins
+ * @inode: inode of file whose ACL this is
+ * @acl: posix_acl to encode
+ * @encode_entries: whether to encode ACEs as well
+ * @typeflag: ACL type: NFS_ACL_DEFAULT or zero
+ *
+ * Returns size of encoded ACL in bytes or a negative errno value.
+ */
+int nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
+		  struct posix_acl *acl, int encode_entries, int typeflag)
 {
 	int entries = (acl && acl->a_count) ? max_t(int, acl->a_count, 4) : 0;
 	struct nfsacl_encode_desc nfsacl_desc = {
@@ -88,17 +104,22 @@
 		.uid = inode->i_uid,
 		.gid = inode->i_gid,
 	};
+	struct nfsacl_simple_acl aclbuf;
 	int err;
-	struct posix_acl *acl2 = NULL;
 
 	if (entries > NFS_ACL_MAX_ENTRIES ||
 	    xdr_encode_word(buf, base, entries))
 		return -EINVAL;
 	if (encode_entries && acl && acl->a_count == 3) {
-		/* Fake up an ACL_MASK entry. */
-		acl2 = posix_acl_alloc(4, GFP_KERNEL);
-		if (!acl2)
-			return -ENOMEM;
+		struct posix_acl *acl2 = &aclbuf.acl;
+
+		/* Avoid the use of posix_acl_alloc().  nfsacl_encode() is
+		 * invoked in contexts where a memory allocation failure is
+		 * fatal.  Fortunately this fake ACL is small enough to
+		 * construct on the stack. */
+		memset(acl2, 0, sizeof(acl2));
+		posix_acl_init(acl2, 4);
+
 		/* Insert entries in canonical order: other orders seem
 		 to confuse Solaris VxFS. */
 		acl2->a_entries[0] = acl->a_entries[0];  /* ACL_USER_OBJ */
@@ -109,8 +130,6 @@
 		nfsacl_desc.acl = acl2;
 	}
 	err = xdr_encode_array2(buf, base + 4, &nfsacl_desc.desc);
-	if (acl2)
-		posix_acl_release(acl2);
 	if (!err)
 		err = 8 + nfsacl_desc.desc.elem_size *
 			  nfsacl_desc.desc.array_len;
@@ -224,9 +243,18 @@
 	return 0;
 }
 
-unsigned int
-nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt,
-	      struct posix_acl **pacl)
+/**
+ * nfsacl_decode - Decode an NFSv3 ACL
+ *
+ * @buf: xdr_buf containing XDR'd ACL data to decode
+ * @base: byte offset in xdr_buf where XDR'd ACL begins
+ * @aclcnt: count of ACEs in decoded posix_acl
+ * @pacl: buffer in which to place decoded posix_acl
+ *
+ * Returns the length of the decoded ACL in bytes, or a negative errno value.
+ */
+int nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt,
+		  struct posix_acl **pacl)
 {
 	struct nfsacl_decode_desc nfsacl_desc = {
 		.desc = {
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 3be975e..cde36cb 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -484,7 +484,7 @@
 out:
 	return status;
 out_default:
-	return nfs_cb_stat_to_errno(status);
+	return nfs_cb_stat_to_errno(nfserr);
 }
 
 /*
@@ -564,11 +564,9 @@
 	if (unlikely(status))
 		goto out;
 	if (unlikely(nfserr != NFS4_OK))
-		goto out_default;
+		status = nfs_cb_stat_to_errno(nfserr);
 out:
 	return status;
-out_default:
-	return nfs_cb_stat_to_errno(status);
 }
 
 /*
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index d98d021..54b60bf 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -230,9 +230,6 @@
 	dp->dl_client = clp;
 	get_nfs4_file(fp);
 	dp->dl_file = fp;
-	dp->dl_vfs_file = find_readable_file(fp);
-	get_file(dp->dl_vfs_file);
-	dp->dl_flock = NULL;
 	dp->dl_type = type;
 	dp->dl_stateid.si_boot = boot_time;
 	dp->dl_stateid.si_stateownerid = current_delegid++;
@@ -241,8 +238,6 @@
 	fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
 	dp->dl_time = 0;
 	atomic_set(&dp->dl_count, 1);
-	list_add(&dp->dl_perfile, &fp->fi_delegations);
-	list_add(&dp->dl_perclnt, &clp->cl_delegations);
 	INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc);
 	return dp;
 }
@@ -253,36 +248,30 @@
 	if (atomic_dec_and_test(&dp->dl_count)) {
 		dprintk("NFSD: freeing dp %p\n",dp);
 		put_nfs4_file(dp->dl_file);
-		fput(dp->dl_vfs_file);
 		kmem_cache_free(deleg_slab, dp);
 		num_delegations--;
 	}
 }
 
-/* Remove the associated file_lock first, then remove the delegation.
- * lease_modify() is called to remove the FS_LEASE file_lock from
- * the i_flock list, eventually calling nfsd's lock_manager
- * fl_release_callback.
- */
-static void
-nfs4_close_delegation(struct nfs4_delegation *dp)
+static void nfs4_put_deleg_lease(struct nfs4_file *fp)
 {
-	dprintk("NFSD: close_delegation dp %p\n",dp);
-	/* XXX: do we even need this check?: */
-	if (dp->dl_flock)
-		vfs_setlease(dp->dl_vfs_file, F_UNLCK, &dp->dl_flock);
+	if (atomic_dec_and_test(&fp->fi_delegees)) {
+		vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
+		fp->fi_lease = NULL;
+		fp->fi_deleg_file = NULL;
+	}
 }
 
 /* Called under the state lock. */
 static void
 unhash_delegation(struct nfs4_delegation *dp)
 {
-	list_del_init(&dp->dl_perfile);
 	list_del_init(&dp->dl_perclnt);
 	spin_lock(&recall_lock);
+	list_del_init(&dp->dl_perfile);
 	list_del_init(&dp->dl_recall_lru);
 	spin_unlock(&recall_lock);
-	nfs4_close_delegation(dp);
+	nfs4_put_deleg_lease(dp->dl_file);
 	nfs4_put_delegation(dp);
 }
 
@@ -958,8 +947,6 @@
 	spin_lock(&recall_lock);
 	while (!list_empty(&clp->cl_delegations)) {
 		dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
-		dprintk("NFSD: expire client. dp %p, fp %p\n", dp,
-				dp->dl_flock);
 		list_del_init(&dp->dl_perclnt);
 		list_move(&dp->dl_recall_lru, &reaplist);
 	}
@@ -2078,6 +2065,7 @@
 		fp->fi_inode = igrab(ino);
 		fp->fi_id = current_fileid++;
 		fp->fi_had_conflict = false;
+		fp->fi_lease = NULL;
 		memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
 		memset(fp->fi_access, 0, sizeof(fp->fi_access));
 		spin_lock(&recall_lock);
@@ -2329,23 +2317,8 @@
 		nfs4_file_put_access(fp, O_RDONLY);
 }
 
-/*
- * Spawn a thread to perform a recall on the delegation represented
- * by the lease (file_lock)
- *
- * Called from break_lease() with lock_flocks() held.
- * Note: we assume break_lease will only call this *once* for any given
- * lease.
- */
-static
-void nfsd_break_deleg_cb(struct file_lock *fl)
+static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
 {
-	struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
-
-	dprintk("NFSD nfsd_break_deleg_cb: dp %p fl %p\n",dp,fl);
-	if (!dp)
-		return;
-
 	/* We're assuming the state code never drops its reference
 	 * without first removing the lease.  Since we're in this lease
 	 * callback (and since the lease code is serialized by the kernel
@@ -2353,22 +2326,35 @@
 	 * it's safe to take a reference: */
 	atomic_inc(&dp->dl_count);
 
-	spin_lock(&recall_lock);
 	list_add_tail(&dp->dl_recall_lru, &del_recall_lru);
-	spin_unlock(&recall_lock);
 
 	/* only place dl_time is set. protected by lock_flocks*/
 	dp->dl_time = get_seconds();
 
+	nfsd4_cb_recall(dp);
+}
+
+/* Called from break_lease() with lock_flocks() held. */
+static void nfsd_break_deleg_cb(struct file_lock *fl)
+{
+	struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
+	struct nfs4_delegation *dp;
+
+	BUG_ON(!fp);
+	/* We assume break_lease is only called once per lease: */
+	BUG_ON(fp->fi_had_conflict);
 	/*
 	 * We don't want the locks code to timeout the lease for us;
-	 * we'll remove it ourself if the delegation isn't returned
-	 * in time.
+	 * we'll remove it ourself if a delegation isn't returned
+	 * in time:
 	 */
 	fl->fl_break_time = 0;
 
-	dp->dl_file->fi_had_conflict = true;
-	nfsd4_cb_recall(dp);
+	spin_lock(&recall_lock);
+	fp->fi_had_conflict = true;
+	list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
+		nfsd_break_one_deleg(dp);
+	spin_unlock(&recall_lock);
 }
 
 static
@@ -2459,13 +2445,15 @@
 static struct nfs4_delegation *
 find_delegation_file(struct nfs4_file *fp, stateid_t *stid)
 {
-	struct nfs4_delegation *dp;
+	struct nfs4_delegation *dp = NULL;
 
+	spin_lock(&recall_lock);
 	list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) {
 		if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid)
-			return dp;
+			break;
 	}
-	return NULL;
+	spin_unlock(&recall_lock);
+	return dp;
 }
 
 int share_access_to_flags(u32 share_access)
@@ -2641,6 +2629,66 @@
 	return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
 }
 
+static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag)
+{
+	struct file_lock *fl;
+
+	fl = locks_alloc_lock();
+	if (!fl)
+		return NULL;
+	locks_init_lock(fl);
+	fl->fl_lmops = &nfsd_lease_mng_ops;
+	fl->fl_flags = FL_LEASE;
+	fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
+	fl->fl_end = OFFSET_MAX;
+	fl->fl_owner = (fl_owner_t)(dp->dl_file);
+	fl->fl_pid = current->tgid;
+	return fl;
+}
+
+static int nfs4_setlease(struct nfs4_delegation *dp, int flag)
+{
+	struct nfs4_file *fp = dp->dl_file;
+	struct file_lock *fl;
+	int status;
+
+	fl = nfs4_alloc_init_lease(dp, flag);
+	if (!fl)
+		return -ENOMEM;
+	fl->fl_file = find_readable_file(fp);
+	list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations);
+	status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
+	if (status) {
+		list_del_init(&dp->dl_perclnt);
+		locks_free_lock(fl);
+		return -ENOMEM;
+	}
+	fp->fi_lease = fl;
+	fp->fi_deleg_file = fl->fl_file;
+	get_file(fp->fi_deleg_file);
+	atomic_set(&fp->fi_delegees, 1);
+	list_add(&dp->dl_perfile, &fp->fi_delegations);
+	return 0;
+}
+
+static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag)
+{
+	struct nfs4_file *fp = dp->dl_file;
+
+	if (!fp->fi_lease)
+		return nfs4_setlease(dp, flag);
+	spin_lock(&recall_lock);
+	if (fp->fi_had_conflict) {
+		spin_unlock(&recall_lock);
+		return -EAGAIN;
+	}
+	atomic_inc(&fp->fi_delegees);
+	list_add(&dp->dl_perfile, &fp->fi_delegations);
+	spin_unlock(&recall_lock);
+	list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations);
+	return 0;
+}
+
 /*
  * Attempt to hand out a delegation.
  */
@@ -2650,7 +2698,6 @@
 	struct nfs4_delegation *dp;
 	struct nfs4_stateowner *sop = stp->st_stateowner;
 	int cb_up;
-	struct file_lock *fl;
 	int status, flag = 0;
 
 	cb_up = nfsd4_cb_channel_good(sop->so_client);
@@ -2681,36 +2728,11 @@
 	}
 
 	dp = alloc_init_deleg(sop->so_client, stp, fh, flag);
-	if (dp == NULL) {
-		flag = NFS4_OPEN_DELEGATE_NONE;
-		goto out;
-	}
-	status = -ENOMEM;
-	fl = locks_alloc_lock();
-	if (!fl)
-		goto out;
-	locks_init_lock(fl);
-	fl->fl_lmops = &nfsd_lease_mng_ops;
-	fl->fl_flags = FL_LEASE;
-	fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
-	fl->fl_end = OFFSET_MAX;
-	fl->fl_owner =  (fl_owner_t)dp;
-	fl->fl_file = find_readable_file(stp->st_file);
-	BUG_ON(!fl->fl_file);
-	fl->fl_pid = current->tgid;
-	dp->dl_flock = fl;
-
-	/* vfs_setlease checks to see if delegation should be handed out.
-	 * the lock_manager callback fl_change is used
-	 */
-	if ((status = vfs_setlease(fl->fl_file, fl->fl_type, &fl))) {
-		dprintk("NFSD: setlease failed [%d], no delegation\n", status);
-		dp->dl_flock = NULL;
-		locks_free_lock(fl);
-		unhash_delegation(dp);
-		flag = NFS4_OPEN_DELEGATE_NONE;
-		goto out;
-	}
+	if (dp == NULL)
+		goto out_no_deleg;
+	status = nfs4_set_delegation(dp, flag);
+	if (status)
+		goto out_free;
 
 	memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid));
 
@@ -2722,6 +2744,12 @@
 			&& open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE)
 		dprintk("NFSD: WARNING: refusing delegation reclaim\n");
 	open->op_delegate_type = flag;
+	return;
+out_free:
+	nfs4_put_delegation(dp);
+out_no_deleg:
+	flag = NFS4_OPEN_DELEGATE_NONE;
+	goto out;
 }
 
 /*
@@ -2916,8 +2944,6 @@
 				test_val = u;
 			break;
 		}
-		dprintk("NFSD: purging unused delegation dp %p, fp %p\n",
-			            dp, dp->dl_flock);
 		list_move(&dp->dl_recall_lru, &reaplist);
 	}
 	spin_unlock(&recall_lock);
@@ -3128,7 +3154,7 @@
 			goto out;
 		renew_client(dp->dl_client);
 		if (filpp) {
-			*filpp = find_readable_file(dp->dl_file);
+			*filpp = dp->dl_file->fi_deleg_file;
 			BUG_ON(!*filpp);
 		}
 	} else { /* open or lock stateid */
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 956629b..1275b86 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -317,8 +317,8 @@
 		READ_BUF(dummy32);
 		len += (XDR_QUADLEN(dummy32) << 2);
 		READMEM(buf, dummy32);
-		if ((host_err = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid)))
-			goto out_nfserr;
+		if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid)))
+			return status;
 		iattr->ia_valid |= ATTR_UID;
 	}
 	if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) {
@@ -328,8 +328,8 @@
 		READ_BUF(dummy32);
 		len += (XDR_QUADLEN(dummy32) << 2);
 		READMEM(buf, dummy32);
-		if ((host_err = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid)))
-			goto out_nfserr;
+		if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid)))
+			return status;
 		iattr->ia_valid |= ATTR_GID;
 	}
 	if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) {
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 3074656..2d31224 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -83,8 +83,6 @@
 	atomic_t		dl_count;       /* ref count */
 	struct nfs4_client	*dl_client;
 	struct nfs4_file	*dl_file;
-	struct file		*dl_vfs_file;
-	struct file_lock	*dl_flock;
 	u32			dl_type;
 	time_t			dl_time;
 /* For recall: */
@@ -379,6 +377,9 @@
 	 */
 	atomic_t		fi_readers;
 	atomic_t		fi_writers;
+	struct file		*fi_deleg_file;
+	struct file_lock	*fi_lease;
+	atomic_t		fi_delegees;
 	struct inode		*fi_inode;
 	u32                     fi_id;      /* used with stateowner->so_id 
 					     * for stateid_hashtbl hash */
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 641117f..da1d970 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -808,7 +808,7 @@
 		if (ra->p_count == 0)
 			frap = rap;
 	}
-	depth = nfsdstats.ra_size*11/10;
+	depth = nfsdstats.ra_size;
 	if (!frap) {	
 		spin_unlock(&rab->pb_lock);
 		return NULL;
@@ -1744,6 +1744,13 @@
 	host_err = nfsd_break_lease(odentry->d_inode);
 	if (host_err)
 		goto out_drop_write;
+	if (ndentry->d_inode) {
+		host_err = nfsd_break_lease(ndentry->d_inode);
+		if (host_err)
+			goto out_drop_write;
+	}
+	if (host_err)
+		goto out_drop_write;
 	host_err = vfs_rename(fdir, odentry, tdir, ndentry);
 	if (!host_err) {
 		host_err = commit_metadata(tfhp);
@@ -1812,22 +1819,22 @@
 
 	host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
 	if (host_err)
-		goto out_nfserr;
+		goto out_put;
 
 	host_err = nfsd_break_lease(rdentry->d_inode);
 	if (host_err)
-		goto out_put;
+		goto out_drop_write;
 	if (type != S_IFDIR)
 		host_err = vfs_unlink(dirp, rdentry);
 	else
 		host_err = vfs_rmdir(dirp, rdentry);
+	if (!host_err)
+		host_err = commit_metadata(fhp);
+out_drop_write:
+	mnt_drop_write(fhp->fh_export->ex_path.mnt);
 out_put:
 	dput(rdentry);
 
-	if (!host_err)
-		host_err = commit_metadata(fhp);
-
-	mnt_drop_write(fhp->fh_export->ex_path.mnt);
 out_nfserr:
 	err = nfserrno(host_err);
 out:
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 0994f6a7..58fd707 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -704,7 +704,8 @@
 	sbp[0]->s_state =
 		cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_VALID_FS);
 	/* synchronize sbp[1] with sbp[0] */
-	memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
+	if (sbp[1])
+		memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
 	return nilfs_commit_super(sbi, NILFS_SB_COMMIT_ALL);
 }
 
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index b572b67..326e747 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -1,7 +1,7 @@
 /**
  * mft.c - NTFS kernel mft record operations. Part of the Linux-NTFS project.
  *
- * Copyright (c) 2001-2006 Anton Altaparmakov
+ * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.
  * Copyright (c) 2002 Richard Russon
  *
  * This program/include file is free software; you can redistribute it and/or
@@ -2576,6 +2576,8 @@
 	flush_dcache_page(page);
 	SetPageUptodate(page);
 	if (base_ni) {
+		MFT_RECORD *m_tmp;
+
 		/*
 		 * Setup the base mft record in the extent mft record.  This
 		 * completes initialization of the allocated extent mft record
@@ -2588,11 +2590,11 @@
 		 * attach it to the base inode @base_ni and map, pin, and lock
 		 * its, i.e. the allocated, mft record.
 		 */
-		m = map_extent_mft_record(base_ni, bit, &ni);
-		if (IS_ERR(m)) {
+		m_tmp = map_extent_mft_record(base_ni, bit, &ni);
+		if (IS_ERR(m_tmp)) {
 			ntfs_error(vol->sb, "Failed to map allocated extent "
 					"mft record 0x%llx.", (long long)bit);
-			err = PTR_ERR(m);
+			err = PTR_ERR(m_tmp);
 			/* Set the mft record itself not in use. */
 			m->flags &= cpu_to_le16(
 					~le16_to_cpu(MFT_RECORD_IN_USE));
@@ -2603,6 +2605,7 @@
 			ntfs_unmap_page(page);
 			goto undo_mftbmp_alloc;
 		}
+		BUG_ON(m != m_tmp);
 		/*
 		 * Make sure the allocated mft record is written out to disk.
 		 * No need to set the inode dirty because the caller is going
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 06d1f74..38f986d 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -993,8 +993,7 @@
 }
 
 /* Handle quota on quotactl */
-static int ocfs2_quota_on(struct super_block *sb, int type, int format_id,
-			  char *path)
+static int ocfs2_quota_on(struct super_block *sb, int type, int format_id)
 {
 	unsigned int feature[MAXQUOTAS] = { OCFS2_FEATURE_RO_COMPAT_USRQUOTA,
 					     OCFS2_FEATURE_RO_COMPAT_GRPQUOTA};
@@ -1013,7 +1012,7 @@
 }
 
 static const struct quotactl_ops ocfs2_quotactl_ops = {
-	.quota_on	= ocfs2_quota_on,
+	.quota_on_meta	= ocfs2_quota_on,
 	.quota_off	= ocfs2_quota_off,
 	.quota_sync	= dquot_quota_sync,
 	.get_info	= dquot_get_dqinfo,
diff --git a/fs/open.c b/fs/open.c
index e52389e..5a2c6eb 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -790,6 +790,8 @@
 
 	/* Pick up the filp from the open intent */
 	filp = nd->intent.open.file;
+	nd->intent.open.file = NULL;
+
 	/* Has the filesystem initialised the file for us? */
 	if (filp->f_path.dentry == NULL) {
 		path_get(&nd->path);
diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
index 68d6a21..11f688b 100644
--- a/fs/partitions/mac.c
+++ b/fs/partitions/mac.c
@@ -29,10 +29,9 @@
 
 int mac_partition(struct parsed_partitions *state)
 {
-	int slot = 1;
 	Sector sect;
 	unsigned char *data;
-	int blk, blocks_in_map;
+	int slot, blocks_in_map;
 	unsigned secsize;
 #ifdef CONFIG_PPC_PMAC
 	int found_root = 0;
@@ -59,10 +58,14 @@
 		put_dev_sector(sect);
 		return 0;		/* not a MacOS disk */
 	}
-	strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
 	blocks_in_map = be32_to_cpu(part->map_count);
-	for (blk = 1; blk <= blocks_in_map; ++blk) {
-		int pos = blk * secsize;
+	if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
+		put_dev_sector(sect);
+		return 0;
+	}
+	strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
+	for (slot = 1; slot <= blocks_in_map; ++slot) {
+		int pos = slot * secsize;
 		put_dev_sector(sect);
 		data = read_part_sector(state, pos/512, &sect);
 		if (!data)
@@ -113,13 +116,11 @@
 			}
 
 			if (goodness > found_root_goodness) {
-				found_root = blk;
+				found_root = slot;
 				found_root_goodness = goodness;
 			}
 		}
 #endif /* CONFIG_PPC_PMAC */
-
-		++slot;
 	}
 #ifdef CONFIG_PPC_PMAC
 	if (found_root_goodness)
diff --git a/fs/pipe.c b/fs/pipe.c
index 89e9e19..da42f7d 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -441,7 +441,7 @@
 			break;
 		}
 		if (do_wakeup) {
-			wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT);
+			wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
  			kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 		}
 		pipe_wait(pipe);
@@ -450,7 +450,7 @@
 
 	/* Signal writers asynchronously that there is more room. */
 	if (do_wakeup) {
-		wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT);
+		wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 	}
 	if (ret > 0)
@@ -612,7 +612,7 @@
 			break;
 		}
 		if (do_wakeup) {
-			wake_up_interruptible_sync_poll(&pipe->wait, POLLIN);
+			wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
 			kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 			do_wakeup = 0;
 		}
@@ -623,7 +623,7 @@
 out:
 	mutex_unlock(&inode->i_mutex);
 	if (do_wakeup) {
-		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN);
+		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 	}
 	if (ret > 0)
@@ -715,7 +715,7 @@
 	if (!pipe->readers && !pipe->writers) {
 		free_pipe_info(inode);
 	} else {
-		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT);
+		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 	}
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 39df95a..b1cf6bf 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -22,6 +22,7 @@
 
 #include <linux/errno.h>
 
+EXPORT_SYMBOL(posix_acl_init);
 EXPORT_SYMBOL(posix_acl_alloc);
 EXPORT_SYMBOL(posix_acl_clone);
 EXPORT_SYMBOL(posix_acl_valid);
@@ -32,6 +33,16 @@
 EXPORT_SYMBOL(posix_acl_permission);
 
 /*
+ * Init a fresh posix_acl
+ */
+void
+posix_acl_init(struct posix_acl *acl, int count)
+{
+	atomic_set(&acl->a_refcount, 1);
+	acl->a_count = count;
+}
+
+/*
  * Allocate a new ACL with the specified number of entries.
  */
 struct posix_acl *
@@ -40,10 +51,8 @@
 	const size_t size = sizeof(struct posix_acl) +
 	                    count * sizeof(struct posix_acl_entry);
 	struct posix_acl *acl = kmalloc(size, flags);
-	if (acl) {
-		atomic_set(&acl->a_refcount, 1);
-		acl->a_count = count;
-	}
+	if (acl)
+		posix_acl_init(acl, count);
 	return acl;
 }
 
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index 6a00688..15af622 100644
--- a/fs/proc/Kconfig
+++ b/fs/proc/Kconfig
@@ -1,5 +1,5 @@
 config PROC_FS
-	bool "/proc file system support" if EMBEDDED
+	bool "/proc file system support" if EXPERT
 	default y
 	help
 	  This is a virtual file system providing information about the status
@@ -40,7 +40,7 @@
         Exports the dump image of crashed kernel in ELF format.
 
 config PROC_SYSCTL
-	bool "Sysctl support (/proc/sys)" if EMBEDDED
+	bool "Sysctl support (/proc/sys)" if EXPERT
 	depends on PROC_FS
 	select SYSCTL
 	default y
@@ -61,7 +61,7 @@
 config PROC_PAGE_MONITOR
  	default y
 	depends on PROC_FS && MMU
-	bool "Enable /proc page monitoring" if EMBEDDED
+	bool "Enable /proc page monitoring" if EXPERT
  	help
 	  Various /proc files exist to monitor process memory utilization:
 	  /proc/pid/smaps, /proc/pid/clear_refs, /proc/pid/pagemap,
diff --git a/fs/proc/array.c b/fs/proc/array.c
index df2b703..7c99c1c 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -353,9 +353,6 @@
 	task_cap(m, task);
 	task_cpus_allowed(m, task);
 	cpuset_task_status_allowed(m, task);
-#if defined(CONFIG_S390)
-	task_show_regs(m, task);
-#endif
 	task_context_switch_counts(m, task);
 	return 0;
 }
diff --git a/fs/proc/consoles.c b/fs/proc/consoles.c
index eafc22a..b701eaa 100644
--- a/fs/proc/consoles.c
+++ b/fs/proc/consoles.c
@@ -67,7 +67,7 @@
 	struct console *con;
 	loff_t off = 0;
 
-	acquire_console_sem();
+	console_lock();
 	for_each_console(con)
 		if (off++ == *pos)
 			break;
@@ -84,7 +84,7 @@
 
 static void c_stop(struct seq_file *m, void *v)
 {
-	release_console_sem();
+	console_unlock();
 }
 
 static const struct seq_operations consoles_op = {
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 84becd3..a2a622e 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2189,8 +2189,8 @@
 }
 EXPORT_SYMBOL(dquot_resume);
 
-int dquot_quota_on_path(struct super_block *sb, int type, int format_id,
-		      struct path *path)
+int dquot_quota_on(struct super_block *sb, int type, int format_id,
+		   struct path *path)
 {
 	int error = security_quota_on(path->dentry);
 	if (error)
@@ -2204,20 +2204,6 @@
 					     DQUOT_LIMITS_ENABLED);
 	return error;
 }
-EXPORT_SYMBOL(dquot_quota_on_path);
-
-int dquot_quota_on(struct super_block *sb, int type, int format_id, char *name)
-{
-	struct path path;
-	int error;
-
-	error = kern_path(name, LOOKUP_FOLLOW, &path);
-	if (!error) {
-		error = dquot_quota_on_path(sb, type, format_id, &path);
-		path_put(&path);
-	}
-	return error;
-}
 EXPORT_SYMBOL(dquot_quota_on);
 
 /*
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index b299961..b34bdb2 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -64,18 +64,15 @@
 }
 
 static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id,
-		         void __user *addr)
+		         struct path *path)
 {
-	char *pathname;
-	int ret = -ENOSYS;
-
-	pathname = getname(addr);
-	if (IS_ERR(pathname))
-		return PTR_ERR(pathname);
-	if (sb->s_qcop->quota_on)
-		ret = sb->s_qcop->quota_on(sb, type, id, pathname);
-	putname(pathname);
-	return ret;
+	if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_on_meta)
+		return -ENOSYS;
+	if (sb->s_qcop->quota_on_meta)
+		return sb->s_qcop->quota_on_meta(sb, type, id);
+	if (IS_ERR(path))
+		return PTR_ERR(path);
+	return sb->s_qcop->quota_on(sb, type, id, path);
 }
 
 static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
@@ -241,7 +238,7 @@
 
 /* Copy parameters and call proper function */
 static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
-		       void __user *addr)
+		       void __user *addr, struct path *path)
 {
 	int ret;
 
@@ -256,7 +253,7 @@
 
 	switch (cmd) {
 	case Q_QUOTAON:
-		return quota_quotaon(sb, type, cmd, id, addr);
+		return quota_quotaon(sb, type, cmd, id, path);
 	case Q_QUOTAOFF:
 		if (!sb->s_qcop->quota_off)
 			return -ENOSYS;
@@ -335,6 +332,7 @@
 {
 	uint cmds, type;
 	struct super_block *sb = NULL;
+	struct path path, *pathp = NULL;
 	int ret;
 
 	cmds = cmd >> SUBCMDSHIFT;
@@ -351,12 +349,27 @@
 		return -ENODEV;
 	}
 
+	/*
+	 * Path for quotaon has to be resolved before grabbing superblock
+	 * because that gets s_umount sem which is also possibly needed by path
+	 * resolution (think about autofs) and thus deadlocks could arise.
+	 */
+	if (cmds == Q_QUOTAON) {
+		ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW, &path);
+		if (ret)
+			pathp = ERR_PTR(ret);
+		else
+			pathp = &path;
+	}
+
 	sb = quotactl_block(special);
 	if (IS_ERR(sb))
 		return PTR_ERR(sb);
 
-	ret = do_quotactl(sb, type, cmds, id, addr);
+	ret = do_quotactl(sb, type, cmds, id, addr, pathp);
 
 	drop_super(sb);
+	if (pathp && !IS_ERR(pathp))
+		path_put(pathp);
 	return ret;
 }
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 2575682..0aab04f 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -632,7 +632,7 @@
 static int reiserfs_release_dquot(struct dquot *);
 static int reiserfs_mark_dquot_dirty(struct dquot *);
 static int reiserfs_write_info(struct super_block *, int);
-static int reiserfs_quota_on(struct super_block *, int, int, char *);
+static int reiserfs_quota_on(struct super_block *, int, int, struct path *);
 
 static const struct dquot_operations reiserfs_quota_operations = {
 	.write_dquot = reiserfs_write_dquot,
@@ -2048,25 +2048,21 @@
  * Standard function to be called on quota_on
  */
 static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
-			     char *name)
+			     struct path *path)
 {
 	int err;
-	struct path path;
 	struct inode *inode;
 	struct reiserfs_transaction_handle th;
 
 	if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA)))
 		return -EINVAL;
 
-	err = kern_path(name, LOOKUP_FOLLOW, &path);
-	if (err)
-		return err;
 	/* Quotafile not on the same filesystem? */
-	if (path.mnt->mnt_sb != sb) {
+	if (path->mnt->mnt_sb != sb) {
 		err = -EXDEV;
 		goto out;
 	}
-	inode = path.dentry->d_inode;
+	inode = path->dentry->d_inode;
 	/* We must not pack tails for quota files on reiserfs for quota IO to work */
 	if (!(REISERFS_I(inode)->i_flags & i_nopack_mask)) {
 		err = reiserfs_unpack(inode, NULL);
@@ -2082,7 +2078,7 @@
 	/* Journaling quota? */
 	if (REISERFS_SB(sb)->s_qf_names[type]) {
 		/* Quotafile not of fs root? */
-		if (path.dentry->d_parent != sb->s_root)
+		if (path->dentry->d_parent != sb->s_root)
 			reiserfs_warning(sb, "super-6521",
 				 "Quota file not on filesystem root. "
 				 "Journalled quota will not work.");
@@ -2101,9 +2097,8 @@
 		if (err)
 			goto out;
 	}
-	err = dquot_quota_on_path(sb, type, format_id, &path);
+	err = dquot_quota_on(sb, type, format_id, path);
 out:
-	path_put(&path);
 	return err;
 }
 
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 2fb2882..8ab48bc 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -63,6 +63,14 @@
 		*length = (unsigned char) bh->b_data[*offset] |
 			(unsigned char) bh->b_data[*offset + 1] << 8;
 		*offset += 2;
+
+		if (*offset == msblk->devblksize) {
+			put_bh(bh);
+			bh = sb_bread(sb, ++(*cur_index));
+			if (bh == NULL)
+				return NULL;
+			*offset = 0;
+		}
 	}
 
 	return bh;
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c
index 856756c..c4eb400 100644
--- a/fs/squashfs/xz_wrapper.c
+++ b/fs/squashfs/xz_wrapper.c
@@ -95,12 +95,6 @@
 			if (!buffer_uptodate(bh[k]))
 				goto release_mutex;
 
-			if (avail == 0) {
-				offset = 0;
-				put_bh(bh[k++]);
-				continue;
-			}
-
 			stream->buf.in = bh[k]->b_data + offset;
 			stream->buf.in_size = avail;
 			stream->buf.in_pos = 0;
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c
index 818a5e0..4661ae2 100644
--- a/fs/squashfs/zlib_wrapper.c
+++ b/fs/squashfs/zlib_wrapper.c
@@ -82,12 +82,6 @@
 			if (!buffer_uptodate(bh[k]))
 				goto release_mutex;
 
-			if (avail == 0) {
-				offset = 0;
-				put_bh(bh[k++]);
-				continue;
-			}
-
 			stream->next_in = bh[k]->b_data + offset;
 			stream->avail_in = avail;
 			offset = 0;
diff --git a/fs/super.c b/fs/super.c
index 74e149e..7e9dd4c 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -177,6 +177,11 @@
 	struct file_system_type *fs = s->s_type;
 	if (atomic_dec_and_test(&s->s_active)) {
 		fs->kill_sb(s);
+		/*
+		 * We need to call rcu_barrier so all the delayed rcu free
+		 * inodes are flushed before we release the fs module.
+		 */
+		rcu_barrier();
 		put_filesystem(fs);
 		put_super(s);
 	} else {
diff --git a/fs/sysfs/Kconfig b/fs/sysfs/Kconfig
index f4b6758..8c41fea 100644
--- a/fs/sysfs/Kconfig
+++ b/fs/sysfs/Kconfig
@@ -1,5 +1,5 @@
 config SYSFS
-	bool "sysfs file system support" if EMBEDDED
+	bool "sysfs file system support" if EXPERT
 	default y
 	help
 	The sysfs filesystem is a virtual filesystem that the kernel uses to
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index b06ede1..f5e2a19 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -985,10 +985,22 @@
 
 		/*
 		 * Extent size must be a multiple of the appropriate block
-		 * size, if set at all.
+		 * size, if set at all. It must also be smaller than the
+		 * maximum extent size supported by the filesystem.
+		 *
+		 * Also, for non-realtime files, limit the extent size hint to
+		 * half the size of the AGs in the filesystem so alignment
+		 * doesn't result in extents larger than an AG.
 		 */
 		if (fa->fsx_extsize != 0) {
-			xfs_extlen_t	size;
+			xfs_extlen_t    size;
+			xfs_fsblock_t   extsize_fsb;
+
+			extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize);
+			if (extsize_fsb > MAXEXTLEN) {
+				code = XFS_ERROR(EINVAL);
+				goto error_return;
+			}
 
 			if (XFS_IS_REALTIME_INODE(ip) ||
 			    ((mask & FSX_XFLAGS) &&
@@ -997,6 +1009,10 @@
 				       mp->m_sb.sb_blocklog;
 			} else {
 				size = mp->m_sb.sb_blocksize;
+				if (extsize_fsb > mp->m_sb.sb_agblocks / 2) {
+					code = XFS_ERROR(EINVAL);
+					goto error_return;
+				}
 			}
 
 			if (fa->fsx_extsize % size) {
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index f8e854b..206a281 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -1863,12 +1863,14 @@
 	xfs_dquot_t	*dqpout;
 	xfs_dquot_t	*dqp;
 	int		restarts;
+	int		startagain;
 
 	restarts = 0;
 	dqpout = NULL;
 
 	/* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
-startagain:
+again:
+	startagain = 0;
 	mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
 
 	list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) {
@@ -1885,13 +1887,10 @@
 			ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
 
 			trace_xfs_dqreclaim_want(dqp);
-
-			xfs_dqunlock(dqp);
-			mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
-			if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
-				return NULL;
 			XQM_STATS_INC(xqmstats.xs_qm_dqwants);
-			goto startagain;
+			restarts++;
+			startagain = 1;
+			goto dqunlock;
 		}
 
 		/*
@@ -1906,23 +1905,20 @@
 			ASSERT(list_empty(&dqp->q_mplist));
 			list_del_init(&dqp->q_freelist);
 			xfs_Gqm->qm_dqfrlist_cnt--;
-			xfs_dqunlock(dqp);
 			dqpout = dqp;
 			XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
-			break;
+			goto dqunlock;
 		}
 
 		ASSERT(dqp->q_hash);
 		ASSERT(!list_empty(&dqp->q_mplist));
 
 		/*
-		 * Try to grab the flush lock. If this dquot is in the process of
-		 * getting flushed to disk, we don't want to reclaim it.
+		 * Try to grab the flush lock. If this dquot is in the process
+		 * of getting flushed to disk, we don't want to reclaim it.
 		 */
-		if (!xfs_dqflock_nowait(dqp)) {
-			xfs_dqunlock(dqp);
-			continue;
-		}
+		if (!xfs_dqflock_nowait(dqp))
+			goto dqunlock;
 
 		/*
 		 * We have the flush lock so we know that this is not in the
@@ -1944,8 +1940,7 @@
 				xfs_fs_cmn_err(CE_WARN, mp,
 			"xfs_qm_dqreclaim: dquot %p flush failed", dqp);
 			}
-			xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
-			continue;
+			goto dqunlock;
 		}
 
 		/*
@@ -1967,13 +1962,8 @@
 		 */
 		if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) {
 			restarts++;
-			mutex_unlock(&dqp->q_hash->qh_lock);
-			xfs_dqfunlock(dqp);
-			xfs_dqunlock(dqp);
-			mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
-			if (restarts++ >= XFS_QM_RECLAIM_MAX_RESTARTS)
-				return NULL;
-			goto startagain;
+			startagain = 1;
+			goto qhunlock;
 		}
 
 		ASSERT(dqp->q_nrefs == 0);
@@ -1986,14 +1976,20 @@
 		xfs_Gqm->qm_dqfrlist_cnt--;
 		dqpout = dqp;
 		mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
+qhunlock:
 		mutex_unlock(&dqp->q_hash->qh_lock);
 dqfunlock:
 		xfs_dqfunlock(dqp);
+dqunlock:
 		xfs_dqunlock(dqp);
 		if (dqpout)
 			break;
 		if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
-			return NULL;
+			break;
+		if (startagain) {
+			mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
+			goto again;
+		}
 	}
 	mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
 	return dqpout;
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
index 0ab56b3..d0b3bc7 100644
--- a/fs/xfs/xfs_alloc.h
+++ b/fs/xfs/xfs_alloc.h
@@ -75,6 +75,22 @@
 #define XFS_ALLOC_SET_ASIDE(mp)  (4 + ((mp)->m_sb.sb_agcount * 4))
 
 /*
+ * When deciding how much space to allocate out of an AG, we limit the
+ * allocation maximum size to the size the AG. However, we cannot use all the
+ * blocks in the AG - some are permanently used by metadata. These
+ * blocks are generally:
+ *	- the AG superblock, AGF, AGI and AGFL
+ *	- the AGF (bno and cnt) and AGI btree root blocks
+ *	- 4 blocks on the AGFL according to XFS_ALLOC_SET_ASIDE() limits
+ *
+ * The AG headers are sector sized, so the amount of space they take up is
+ * dependent on filesystem geometry. The others are all single blocks.
+ */
+#define XFS_ALLOC_AG_MAX_USABLE(mp)	\
+	((mp)->m_sb.sb_agblocks - XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)) - 7)
+
+
+/*
  * Argument structure for xfs_alloc routines.
  * This is turned into a structure to avoid having 20 arguments passed
  * down several levels of the stack.
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 4111cd3..dc3afd7 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -1038,17 +1038,34 @@
 		 * Filling in the middle part of a previous delayed allocation.
 		 * Contiguity is impossible here.
 		 * This case is avoided almost all the time.
+		 *
+		 * We start with a delayed allocation:
+		 *
+		 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
+		 *  PREV @ idx
+		 *
+	         * and we are allocating:
+		 *                     +rrrrrrrrrrrrrrrrr+
+		 *			      new
+		 *
+		 * and we set it up for insertion as:
+		 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
+		 *                            new
+		 *  PREV @ idx          LEFT              RIGHT
+		 *                      inserted at idx + 1
 		 */
 		temp = new->br_startoff - PREV.br_startoff;
-		trace_xfs_bmap_pre_update(ip, idx, 0, _THIS_IP_);
-		xfs_bmbt_set_blockcount(ep, temp);
-		r[0] = *new;
-		r[1].br_state = PREV.br_state;
-		r[1].br_startblock = 0;
-		r[1].br_startoff = new_endoff;
 		temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
-		r[1].br_blockcount = temp2;
-		xfs_iext_insert(ip, idx + 1, 2, &r[0], state);
+		trace_xfs_bmap_pre_update(ip, idx, 0, _THIS_IP_);
+		xfs_bmbt_set_blockcount(ep, temp);	/* truncate PREV */
+		LEFT = *new;
+		RIGHT.br_state = PREV.br_state;
+		RIGHT.br_startblock = nullstartblock(
+				(int)xfs_bmap_worst_indlen(ip, temp2));
+		RIGHT.br_startoff = new_endoff;
+		RIGHT.br_blockcount = temp2;
+		/* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
+		xfs_iext_insert(ip, idx + 1, 2, &LEFT, state);
 		ip->i_df.if_lastex = idx + 1;
 		ip->i_d.di_nextents++;
 		if (cur == NULL)
@@ -2430,7 +2447,7 @@
 		startag = ag = 0;
 
 	pag = xfs_perag_get(mp, ag);
-	while (*blen < ap->alen) {
+	while (*blen < args->maxlen) {
 		if (!pag->pagf_init) {
 			error = xfs_alloc_pagf_init(mp, args->tp, ag,
 						    XFS_ALLOC_FLAG_TRYLOCK);
@@ -2452,7 +2469,7 @@
 			notinit = 1;
 
 		if (xfs_inode_is_filestream(ap->ip)) {
-			if (*blen >= ap->alen)
+			if (*blen >= args->maxlen)
 				break;
 
 			if (ap->userdata) {
@@ -2498,14 +2515,14 @@
 	 * If the best seen length is less than the request
 	 * length, use the best as the minimum.
 	 */
-	else if (*blen < ap->alen)
+	else if (*blen < args->maxlen)
 		args->minlen = *blen;
 	/*
-	 * Otherwise we've seen an extent as big as alen,
+	 * Otherwise we've seen an extent as big as maxlen,
 	 * use that as the minimum.
 	 */
 	else
-		args->minlen = ap->alen;
+		args->minlen = args->maxlen;
 
 	/*
 	 * set the failure fallback case to look in the selected
@@ -2573,7 +2590,9 @@
 	args.tp = ap->tp;
 	args.mp = mp;
 	args.fsbno = ap->rval;
-	args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks);
+
+	/* Trim the allocation back to the maximum an AG can fit. */
+	args.maxlen = MIN(ap->alen, XFS_ALLOC_AG_MAX_USABLE(mp));
 	args.firstblock = ap->firstblock;
 	blen = 0;
 	if (nullfb) {
@@ -2621,7 +2640,7 @@
 			/*
 			 * Adjust for alignment
 			 */
-			if (blen > args.alignment && blen <= ap->alen)
+			if (blen > args.alignment && blen <= args.maxlen)
 				args.minlen = blen - args.alignment;
 			args.minalignslop = 0;
 		} else {
@@ -2640,7 +2659,7 @@
 			 * of minlen+alignment+slop doesn't go up
 			 * between the calls.
 			 */
-			if (blen > mp->m_dalign && blen <= ap->alen)
+			if (blen > mp->m_dalign && blen <= args.maxlen)
 				nextminlen = blen - mp->m_dalign;
 			else
 				nextminlen = args.minlen;
@@ -4485,6 +4504,16 @@
 				/* Figure out the extent size, adjust alen */
 				extsz = xfs_get_extsz_hint(ip);
 				if (extsz) {
+					/*
+					 * make sure we don't exceed a single
+					 * extent length when we align the
+					 * extent by reducing length we are
+					 * going to allocate by the maximum
+					 * amount extent size aligment may
+					 * require.
+					 */
+					alen = XFS_FILBLKS_MIN(len,
+						   MAXEXTLEN - (2 * extsz - 1));
 					error = xfs_bmap_extsize_align(mp,
 							&got, &prev, extsz,
 							rt, eof,
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 98c6f73..6f8c21c 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -427,13 +427,15 @@
 
 		if (remove) {
 			/*
-			 * We have to remove the log item from the transaction
-			 * as we are about to release our reference to the
-			 * buffer.  If we don't, the unlock that occurs later
-			 * in xfs_trans_uncommit() will ry to reference the
+			 * If we are in a transaction context, we have to
+			 * remove the log item from the transaction as we are
+			 * about to release our reference to the buffer.  If we
+			 * don't, the unlock that occurs later in
+			 * xfs_trans_uncommit() will try to reference the
 			 * buffer which we no longer have a hold on.
 			 */
-			xfs_trans_del_item(lip);
+			if (lip->li_desc)
+				xfs_trans_del_item(lip);
 
 			/*
 			 * Since the transaction no longer refers to the buffer,
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index 75f2ef6..d22e626 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -138,7 +138,8 @@
 
 	if (remove) {
 		ASSERT(!(lip->li_flags & XFS_LI_IN_AIL));
-		xfs_trans_del_item(lip);
+		if (lip->li_desc)
+			xfs_trans_del_item(lip);
 		xfs_efi_item_free(efip);
 		return;
 	}
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 55582bd..8a0f044 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -337,7 +337,12 @@
 		int shift = 0;
 		int64_t freesp;
 
-		alloc_blocks = XFS_B_TO_FSB(mp, ip->i_size);
+		/*
+		 * rounddown_pow_of_two() returns an undefined result
+		 * if we pass in alloc_blocks = 0. Hence the "+ 1" to
+		 * ensure we always pass in a non-zero value.
+		 */
+		alloc_blocks = XFS_B_TO_FSB(mp, ip->i_size) + 1;
 		alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN,
 					rounddown_pow_of_two(alloc_blocks));
 
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 916eb7d..3bd3291 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -191,7 +191,7 @@
 
 xlog_tid_t xfs_log_get_trans_ident(struct xfs_trans *tp);
 
-int	xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
+void	xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
 				struct xfs_log_vec *log_vector,
 				xfs_lsn_t *commit_lsn, int flags);
 bool	xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 9dc8125..9ca59be 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -543,7 +543,7 @@
 
 	error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0);
 	if (error)
-		goto out_abort;
+		goto out_abort_free_ticket;
 
 	/*
 	 * now that we've written the checkpoint into the log, strictly
@@ -569,8 +569,9 @@
 	}
 	spin_unlock(&cil->xc_cil_lock);
 
+	/* xfs_log_done always frees the ticket on error. */
 	commit_lsn = xfs_log_done(log->l_mp, tic, &commit_iclog, 0);
-	if (error || commit_lsn == -1)
+	if (commit_lsn == -1)
 		goto out_abort;
 
 	/* attach all the transactions w/ busy extents to iclog */
@@ -600,6 +601,8 @@
 	kmem_free(new_ctx);
 	return 0;
 
+out_abort_free_ticket:
+	xfs_log_ticket_put(tic);
 out_abort:
 	xlog_cil_committed(ctx, XFS_LI_ABORTED);
 	return XFS_ERROR(EIO);
@@ -622,7 +625,7 @@
  * background commit, returns without it held once background commits are
  * allowed again.
  */
-int
+void
 xfs_log_commit_cil(
 	struct xfs_mount	*mp,
 	struct xfs_trans	*tp,
@@ -637,11 +640,6 @@
 	if (flags & XFS_TRANS_RELEASE_LOG_RES)
 		log_flags = XFS_LOG_REL_PERM_RESERV;
 
-	if (XLOG_FORCED_SHUTDOWN(log)) {
-		xlog_cil_free_logvec(log_vector);
-		return XFS_ERROR(EIO);
-	}
-
 	/*
 	 * do all the hard work of formatting items (including memory
 	 * allocation) outside the CIL context lock. This prevents stalling CIL
@@ -701,7 +699,6 @@
 	 */
 	if (push)
 		xlog_cil_push(log, 0);
-	return 0;
 }
 
 /*
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 33dbc4e..7692279 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1446,6 +1446,14 @@
  * Bulk operation version of xfs_trans_committed that takes a log vector of
  * items to insert into the AIL. This uses bulk AIL insertion techniques to
  * minimise lock traffic.
+ *
+ * If we are called with the aborted flag set, it is because a log write during
+ * a CIL checkpoint commit has failed. In this case, all the items in the
+ * checkpoint have already gone through IOP_COMMITED and IOP_UNLOCK, which
+ * means that checkpoint commit abort handling is treated exactly the same
+ * as an iclog write error even though we haven't started any IO yet. Hence in
+ * this case all we need to do is IOP_COMMITTED processing, followed by an
+ * IOP_UNPIN(aborted) call.
  */
 void
 xfs_trans_committed_bulk(
@@ -1472,6 +1480,16 @@
 		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
 			continue;
 
+		/*
+		 * if we are aborting the operation, no point in inserting the
+		 * object into the AIL as we are in a shutdown situation.
+		 */
+		if (aborted) {
+			ASSERT(XFS_FORCED_SHUTDOWN(ailp->xa_mount));
+			IOP_UNPIN(lip, 1);
+			continue;
+		}
+
 		if (item_lsn != commit_lsn) {
 
 			/*
@@ -1503,20 +1521,24 @@
 }
 
 /*
- * Called from the trans_commit code when we notice that
- * the filesystem is in the middle of a forced shutdown.
+ * Called from the trans_commit code when we notice that the filesystem is in
+ * the middle of a forced shutdown.
+ *
+ * When we are called here, we have already pinned all the items in the
+ * transaction. However, neither IOP_COMMITTING or IOP_UNLOCK has been called
+ * so we can simply walk the items in the transaction, unpin them with an abort
+ * flag and then free the items. Note that unpinning the items can result in
+ * them being freed immediately, so we need to use a safe list traversal method
+ * here.
  */
 STATIC void
 xfs_trans_uncommit(
 	struct xfs_trans	*tp,
 	uint			flags)
 {
-	struct xfs_log_item_desc *lidp;
+	struct xfs_log_item_desc *lidp, *n;
 
-	list_for_each_entry(lidp, &tp->t_items, lid_trans) {
-		/*
-		 * Unpin all but those that aren't dirty.
-		 */
+	list_for_each_entry_safe(lidp, n, &tp->t_items, lid_trans) {
 		if (lidp->lid_flags & XFS_LID_DIRTY)
 			IOP_UNPIN(lidp->lid_item, 1);
 	}
@@ -1733,7 +1755,6 @@
 	int			flags)
 {
 	struct xfs_log_vec	*log_vector;
-	int			error;
 
 	/*
 	 * Get each log item to allocate a vector structure for
@@ -1744,9 +1765,7 @@
 	if (!log_vector)
 		return ENOMEM;
 
-	error = xfs_log_commit_cil(mp, tp, log_vector, commit_lsn, flags);
-	if (error)
-		return error;
+	xfs_log_commit_cil(mp, tp, log_vector, commit_lsn, flags);
 
 	current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
 	xfs_trans_free(tp);
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 17714be..5b6c391 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 9cf736e..fc1575f 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index bc4a6de..ef1cef7 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index a091cab..de39915 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 65b3f58..a3252a5 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -8,7 +8,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 241b8a0..e46ec95 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -47,7 +47,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20101209
+#define ACPI_CA_VERSION                 0x20110112
 
 #include "actypes.h"
 #include "actbl.h"
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index e552635..0a66cc4 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index ad20016..7e42bfe 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index cd77aa7..7504bc9 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index d4136b2..0fc15df 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 939a431..64f838b 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index a3e334a..5af3ed5 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 5dcb953..e228893 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 572189e..5d2a5e9 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2010, Intel Corp.
+ * Copyright (C) 2000 - 2011, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 6864933..fe77e33 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -124,7 +124,8 @@
 #endif
 
 #ifdef CONFIG_EVENT_TRACING
-#define FTRACE_EVENTS()	VMLINUX_SYMBOL(__start_ftrace_events) = .;	\
+#define FTRACE_EVENTS()	. = ALIGN(8);					\
+			VMLINUX_SYMBOL(__start_ftrace_events) = .;	\
 			*(_ftrace_events)				\
 			VMLINUX_SYMBOL(__stop_ftrace_events) = .;
 #else
@@ -140,7 +141,8 @@
 #endif
 
 #ifdef CONFIG_FTRACE_SYSCALLS
-#define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .;	\
+#define TRACE_SYSCALLS() . = ALIGN(8);					\
+			 VMLINUX_SYMBOL(__start_syscalls_metadata) = .;	\
 			 *(__syscalls_metadata)				\
 			 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
 #else
@@ -165,10 +167,8 @@
 	CPU_KEEP(exit.data)						\
 	MEM_KEEP(init.data)						\
 	MEM_KEEP(exit.data)						\
-	. = ALIGN(32);							\
-	VMLINUX_SYMBOL(__start___tracepoints) = .;			\
+	STRUCT_ALIGN();							\
 	*(__tracepoints)						\
-	VMLINUX_SYMBOL(__stop___tracepoints) = .;			\
 	/* implement dynamic printk debug */				\
 	. = ALIGN(8);							\
 	VMLINUX_SYMBOL(__start___verbose) = .;                          \
@@ -176,13 +176,7 @@
 	VMLINUX_SYMBOL(__stop___verbose) = .;				\
 	LIKELY_PROFILE()		       				\
 	BRANCH_PROFILE()						\
-	TRACE_PRINTKS()							\
-									\
-	STRUCT_ALIGN();							\
-	FTRACE_EVENTS()							\
-									\
-	STRUCT_ALIGN();							\
-	TRACE_SYSCALLS()
+	TRACE_PRINTKS()
 
 /*
  * Data section helpers
@@ -220,6 +214,10 @@
 		VMLINUX_SYMBOL(__start_rodata) = .;			\
 		*(.rodata) *(.rodata.*)					\
 		*(__vermagic)		/* Kernel version magic */	\
+		. = ALIGN(8);						\
+		VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .;		\
+		*(__tracepoints_ptrs)	/* Tracepoints: pointer array */\
+		VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .;		\
 		*(__markers_strings)	/* Markers: strings */		\
 		*(__tracepoints_strings)/* Tracepoints: strings */	\
 	}								\
@@ -364,6 +362,13 @@
 		VMLINUX_SYMBOL(__start___param) = .;			\
 		*(__param)						\
 		VMLINUX_SYMBOL(__stop___param) = .;			\
+	}								\
+									\
+	/* Built-in module versions. */					\
+	__modver : AT(ADDR(__modver) - LOAD_OFFSET) {			\
+		VMLINUX_SYMBOL(__start___modver) = .;			\
+		*(__modver)						\
+		VMLINUX_SYMBOL(__stop___modver) = .;			\
 		. = ALIGN((align));					\
 		VMLINUX_SYMBOL(__end_rodata) = .;			\
 	}								\
@@ -475,6 +480,8 @@
 	KERNEL_CTORS()							\
 	*(.init.rodata)							\
 	MCOUNT_REC()							\
+	FTRACE_EVENTS()							\
+	TRACE_SYSCALLS()						\
 	DEV_DISCARD(init.rodata)					\
 	CPU_DISCARD(init.rodata)					\
 	MEM_DISCARD(init.rodata)					\
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index a4694c6..fe29aad 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1367,7 +1367,7 @@
 extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
 extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
 				     struct timeval *vblanktime);
-extern void drm_handle_vblank(struct drm_device *dev, int crtc);
+extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
 extern int drm_vblank_get(struct drm_device *dev, int crtc);
 extern void drm_vblank_put(struct drm_device *dev, int crtc);
 extern void drm_vblank_off(struct drm_device *dev, int crtc);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index acd7fad..801be59 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -275,6 +275,7 @@
 
 /**
  * drm_crtc_funcs - control CRTCs for a given device
+ * @reset: reset CRTC after state has been invalidate (e.g. resume)
  * @dpms: control display power levels
  * @save: save CRTC state
  * @resore: restore CRTC state
@@ -302,6 +303,8 @@
 	void (*save)(struct drm_crtc *crtc); /* suspend? */
 	/* Restore CRTC state */
 	void (*restore)(struct drm_crtc *crtc); /* resume? */
+	/* Reset CRTC state */
+	void (*reset)(struct drm_crtc *crtc);
 
 	/* cursor controls */
 	int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv,
@@ -379,6 +382,7 @@
  * @dpms: set power state (see drm_crtc_funcs above)
  * @save: save connector state
  * @restore: restore connector state
+ * @reset: reset connector after state has been invalidate (e.g. resume)
  * @mode_valid: is this mode valid on the given connector?
  * @mode_fixup: try to fixup proposed mode for this connector
  * @mode_set: set this mode
@@ -396,6 +400,7 @@
 	void (*dpms)(struct drm_connector *connector, int mode);
 	void (*save)(struct drm_connector *connector);
 	void (*restore)(struct drm_connector *connector);
+	void (*reset)(struct drm_connector *connector);
 
 	/* Check to see if anything is attached to the connector.
 	 * @force is set to false whilst polling, true when checking the
@@ -413,6 +418,7 @@
 };
 
 struct drm_encoder_funcs {
+	void (*reset)(struct drm_encoder *encoder);
 	void (*destroy)(struct drm_encoder *encoder);
 };
 
@@ -656,6 +662,7 @@
 						   struct drm_display_mode *mode);
 extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode);
 extern void drm_mode_config_init(struct drm_device *dev);
+extern void drm_mode_config_reset(struct drm_device *dev);
 extern void drm_mode_config_cleanup(struct drm_device *dev);
 extern void drm_mode_set_name(struct drm_display_mode *mode);
 extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2);
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index fe29ae3..5ff1194 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -28,7 +28,6 @@
 	{0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
 	{0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
 	{0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
-	{0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
 	{0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
 	{0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
 	{0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index e95a86b..e5c607a 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -907,6 +907,7 @@
 #define RADEON_INFO_TILING_CONFIG	0x06
 #define RADEON_INFO_WANT_HYPERZ		0x07
 #define RADEON_INFO_WANT_CMASK		0x08 /* get access to CMASK on r300 */
+#define RADEON_INFO_CLOCK_CRYSTAL_FREQ	0x09 /* clock crystal frequency */
 
 struct drm_radeon_info {
 	uint32_t		request;
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 2296d8b..b0ada6f 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -1,5 +1,6 @@
 header-y += byteorder/
 header-y += can/
+header-y += caif/
 header-y += dvb/
 header-y += hdlc/
 header-y += isdn/
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index eb176bb..a2e910e 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -306,9 +306,6 @@
 					     u32 *mask, u32 req);
 extern void acpi_early_init(void);
 
-int acpi_os_map_generic_address(struct acpi_generic_address *addr);
-void acpi_os_unmap_generic_address(struct acpi_generic_address *addr);
-
 #else	/* !CONFIG_ACPI */
 
 #define acpi_disabled 1
diff --git a/include/linux/acpi_io.h b/include/linux/acpi_io.h
new file mode 100644
index 0000000..7180013
--- /dev/null
+++ b/include/linux/acpi_io.h
@@ -0,0 +1,16 @@
+#ifndef _ACPI_IO_H_
+#define _ACPI_IO_H_
+
+#include <linux/io.h>
+#include <acpi/acpi.h>
+
+static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
+					    acpi_size size)
+{
+       return ioremap_cache(phys, size);
+}
+
+int acpi_os_map_generic_address(struct acpi_generic_address *addr);
+void acpi_os_unmap_generic_address(struct acpi_generic_address *addr);
+
+#endif
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 359df04..9d339eb 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -103,6 +103,8 @@
 #define AUDIT_BPRM_FCAPS	1321	/* Information about fcaps increasing perms */
 #define AUDIT_CAPSET		1322	/* Record showing argument to sys_capset */
 #define AUDIT_MMAP		1323	/* Record showing descriptor and flags in mmap */
+#define AUDIT_NETFILTER_PKT	1324	/* Packets traversing netfilter chains */
+#define AUDIT_NETFILTER_CFG	1325	/* Netfilter chain modifications */
 
 #define AUDIT_AVC		1400	/* SE Linux avc denial or grant */
 #define AUDIT_SELINUX_ERR	1401	/* Internal SE Linux Errors */
diff --git a/include/linux/caif/Kbuild b/include/linux/caif/Kbuild
new file mode 100644
index 0000000..a9cf250
--- /dev/null
+++ b/include/linux/caif/Kbuild
@@ -0,0 +1,2 @@
+header-y += caif_socket.h
+header-y += if_caif.h
diff --git a/include/linux/console.h b/include/linux/console.h
index 9774fe6..7453cfd 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -139,9 +139,9 @@
 extern void register_console(struct console *);
 extern int unregister_console(struct console *);
 extern struct console *console_drivers;
-extern void acquire_console_sem(void);
-extern int try_acquire_console_sem(void);
-extern void release_console_sem(void);
+extern void console_lock(void);
+extern int console_trylock(void);
+extern void console_unlock(void);
 extern void console_conditional_schedule(void);
 extern void console_unblank(void);
 extern struct tty_driver *console_device(int *);
diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h
new file mode 100644
index 0000000..473771a
--- /dev/null
+++ b/include/linux/cpu_rmap.h
@@ -0,0 +1,73 @@
+/*
+ * cpu_rmap.c: CPU affinity reverse-map support
+ * Copyright 2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+
+/**
+ * struct cpu_rmap - CPU affinity reverse-map
+ * @size: Number of objects to be reverse-mapped
+ * @used: Number of objects added
+ * @obj: Pointer to array of object pointers
+ * @near: For each CPU, the index and distance to the nearest object,
+ *      based on affinity masks
+ */
+struct cpu_rmap {
+	u16		size, used;
+	void		**obj;
+	struct {
+		u16	index;
+		u16	dist;
+	}		near[0];
+};
+#define CPU_RMAP_DIST_INF 0xffff
+
+extern struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags);
+
+/**
+ * free_cpu_rmap - free CPU affinity reverse-map
+ * @rmap: Reverse-map allocated with alloc_cpu_rmap(), or %NULL
+ */
+static inline void free_cpu_rmap(struct cpu_rmap *rmap)
+{
+	kfree(rmap);
+}
+
+extern int cpu_rmap_add(struct cpu_rmap *rmap, void *obj);
+extern int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
+			   const struct cpumask *affinity);
+
+static inline u16 cpu_rmap_lookup_index(struct cpu_rmap *rmap, unsigned int cpu)
+{
+	return rmap->near[cpu].index;
+}
+
+static inline void *cpu_rmap_lookup_obj(struct cpu_rmap *rmap, unsigned int cpu)
+{
+	return rmap->obj[rmap->near[cpu].index];
+}
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+
+/**
+ * alloc_irq_cpu_rmap - allocate CPU affinity reverse-map for IRQs
+ * @size: Number of objects to be mapped
+ *
+ * Must be called in process context.
+ */
+static inline struct cpu_rmap *alloc_irq_cpu_rmap(unsigned int size)
+{
+	return alloc_cpu_rmap(size, GFP_KERNEL);
+}
+extern void free_irq_cpu_rmap(struct cpu_rmap *rmap);
+
+extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq);
+
+#endif
diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h
index 68cd248..4c5b26e 100644
--- a/include/linux/dcbnl.h
+++ b/include/linux/dcbnl.h
@@ -25,6 +25,11 @@
 /* IEEE 802.1Qaz std supported values */
 #define IEEE_8021QAZ_MAX_TCS	8
 
+#define IEEE_8021QAZ_TSA_STRICT		0
+#define IEEE_8021QAZ_TSA_CB_SHABER	1
+#define IEEE_8021QAZ_TSA_ETS		2
+#define IEEE_8021QAZ_TSA_VENDOR		255
+
 /* This structure contains the IEEE 802.1Qaz ETS managed object
  *
  * @willing: willing bit in ETS configuratin TLV
@@ -101,8 +106,8 @@
  */
 struct dcb_app {
 	__u8	selector;
-	__u32	protocol;
 	__u8	priority;
+	__u16	protocol;
 };
 
 struct dcbmsg {
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 010e2d8..d638e85 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -279,8 +279,6 @@
 	DCCP_MAX_STATES
 };
 
-#define DCCP_STATE_MASK 0x1f
-
 enum {
 	DCCPF_OPEN	      = TCPF_ESTABLISHED,
 	DCCPF_REQUESTING      = TCPF_SYN_SENT,
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 1908929..54d776c 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -251,6 +251,7 @@
 	ETH_SS_STATS,
 	ETH_SS_PRIV_FLAGS,
 	ETH_SS_NTUPLE_FILTERS,
+	ETH_SS_FEATURES,
 };
 
 /* for passing string sets for data tagging */
@@ -523,6 +524,87 @@
 	char	data[ETHTOOL_FLASH_MAX_FILENAME];
 };
 
+/* for returning and changing feature sets */
+
+/**
+ * struct ethtool_get_features_block - block with state of 32 features
+ * @available: mask of changeable features
+ * @requested: mask of features requested to be enabled if possible
+ * @active: mask of currently enabled features
+ * @never_changed: mask of features not changeable for any device
+ */
+struct ethtool_get_features_block {
+	__u32	available;
+	__u32	requested;
+	__u32	active;
+	__u32	never_changed;
+};
+
+/**
+ * struct ethtool_gfeatures - command to get state of device's features
+ * @cmd: command number = %ETHTOOL_GFEATURES
+ * @size: in: number of elements in the features[] array;
+ *       out: number of elements in features[] needed to hold all features
+ * @features: state of features
+ */
+struct ethtool_gfeatures {
+	__u32	cmd;
+	__u32	size;
+	struct ethtool_get_features_block features[0];
+};
+
+/**
+ * struct ethtool_set_features_block - block with request for 32 features
+ * @valid: mask of features to be changed
+ * @requested: values of features to be changed
+ */
+struct ethtool_set_features_block {
+	__u32	valid;
+	__u32	requested;
+};
+
+/**
+ * struct ethtool_sfeatures - command to request change in device's features
+ * @cmd: command number = %ETHTOOL_SFEATURES
+ * @size: array size of the features[] array
+ * @features: feature change masks
+ */
+struct ethtool_sfeatures {
+	__u32	cmd;
+	__u32	size;
+	struct ethtool_set_features_block features[0];
+};
+
+/*
+ * %ETHTOOL_SFEATURES changes features present in features[].valid to the
+ * values of corresponding bits in features[].requested. Bits in .requested
+ * not set in .valid or not changeable are ignored.
+ *
+ * Returns %EINVAL when .valid contains undefined or never-changable bits
+ * or size is not equal to required number of features words (32-bit blocks).
+ * Returns >= 0 if request was completed; bits set in the value mean:
+ *   %ETHTOOL_F_UNSUPPORTED - there were bits set in .valid that are not
+ *	changeable (not present in %ETHTOOL_GFEATURES' features[].available)
+ *	those bits were ignored.
+ *   %ETHTOOL_F_WISH - some or all changes requested were recorded but the
+ *      resulting state of bits masked by .valid is not equal to .requested.
+ *      Probably there are other device-specific constraints on some features
+ *      in the set. When %ETHTOOL_F_UNSUPPORTED is set, .valid is considered
+ *      here as though ignored bits were cleared.
+ *
+ * Meaning of bits in the masks are obtained by %ETHTOOL_GSSET_INFO (number of
+ * bits in the arrays - always multiple of 32) and %ETHTOOL_GSTRINGS commands
+ * for ETH_SS_FEATURES string set. First entry in the table corresponds to least
+ * significant bit in features[0] fields. Empty strings mark undefined features.
+ */
+enum ethtool_sfeatures_retval_bits {
+	ETHTOOL_F_UNSUPPORTED__BIT,
+	ETHTOOL_F_WISH__BIT,
+};
+
+#define ETHTOOL_F_UNSUPPORTED   (1 << ETHTOOL_F_UNSUPPORTED__BIT)
+#define ETHTOOL_F_WISH          (1 << ETHTOOL_F_WISH__BIT)
+
 #ifdef __KERNEL__
 
 #include <linux/rculist.h>
@@ -543,7 +625,6 @@
 
 /* Some generic methods drivers may use in their ethtool_ops */
 u32 ethtool_op_get_link(struct net_device *dev);
-u32 ethtool_op_get_rx_csum(struct net_device *dev);
 u32 ethtool_op_get_tx_csum(struct net_device *dev);
 int ethtool_op_set_tx_csum(struct net_device *dev, u32 data);
 int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data);
@@ -744,6 +825,9 @@
 #define ETHTOOL_GRXFHINDIR	0x00000038 /* Get RX flow hash indir'n table */
 #define ETHTOOL_SRXFHINDIR	0x00000039 /* Set RX flow hash indir'n table */
 
+#define ETHTOOL_GFEATURES	0x0000003a /* Get device offload settings */
+#define ETHTOOL_SFEATURES	0x0000003b /* Change device offload settings */
+
 /* compatibility with older code */
 #define SPARC_ETH_GSET		ETHTOOL_GSET
 #define SPARC_ETH_SSET		ETHTOOL_SSET
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index da7e52b..1effc8b 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -109,7 +109,7 @@
 }
 
 /*
- * Check if the task should be counted as freezeable by the freezer
+ * Check if the task should be counted as freezable by the freezer
  */
 static inline int freezer_should_skip(struct task_struct *p)
 {
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 32b38cd..bd32159 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2555,9 +2555,12 @@
 		   void __user *buffer, size_t *lenp, loff_t *ppos);
 int __init get_filesystem_list(char *buf);
 
+#define __FMODE_EXEC		((__force int) FMODE_EXEC)
+#define __FMODE_NONOTIFY	((__force int) FMODE_NONOTIFY)
+
 #define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
 #define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
-					    (flag & FMODE_NONOTIFY)))
+					    (flag & __FMODE_NONOTIFY)))
 
 #endif /* __KERNEL__ */
 #endif /* _LINUX_FS_H */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index a3b148a..0b84c61 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -249,7 +249,7 @@
 					 ((1 << ZONES_SHIFT) - 1);
 
 	if (__builtin_constant_p(bit))
-		MAYBE_BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
+		BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
 	else {
 #ifdef CONFIG_DEBUG_VM
 		BUG_ON((GFP_ZONE_BAD >> bit) & 1);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 8e6c8c4..df29c8f 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -57,7 +57,8 @@
 	  (transparent_hugepage_flags &					\
 	   (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) &&			\
 	   ((__vma)->vm_flags & VM_HUGEPAGE))) &&			\
-	 !((__vma)->vm_flags & VM_NOHUGEPAGE))
+	 !((__vma)->vm_flags & VM_NOHUGEPAGE) &&			\
+	 !is_vma_temporary_stack(__vma))
 #define transparent_hugepage_defrag(__vma)				\
 	((transparent_hugepage_flags &					\
 	  (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) ||			\
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 6485d2a..f4a2e6b 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -135,6 +135,7 @@
 	IFLA_VF_PORTS,
 	IFLA_PORT_SELF,
 	IFLA_AF_SPEC,
+	IFLA_GROUP,		/* Group the device belongs to */
 	__IFLA_MAX
 };
 
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index ae8fdc5..5f81466 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -144,6 +144,7 @@
 #define IN_DEV_ARP_NOTIFY(in_dev)	IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
 
 struct in_ifaddr {
+	struct hlist_node	hash;
 	struct in_ifaddr	*ifa_next;
 	struct in_device	*ifa_dev;
 	struct rcu_head		rcu_head;
diff --git a/include/linux/input/bu21013.h b/include/linux/input/bu21013.h
index e470d38..05e0328 100644
--- a/include/linux/input/bu21013.h
+++ b/include/linux/input/bu21013.h
@@ -12,8 +12,6 @@
  * @cs_en:	pointer to the cs enable function
  * @cs_dis:	pointer to the cs disable function
  * @irq_read_val:    pointer to read the pen irq value function
- * @x_max_res: xmax resolution
- * @y_max_res: ymax resolution
  * @touch_x_max: touch x max
  * @touch_y_max: touch y max
  * @cs_pin: chip select pin
@@ -29,8 +27,6 @@
 	int (*cs_en)(int reset_pin);
 	int (*cs_dis)(int reset_pin);
 	int (*irq_read_val)(void);
-	int x_max_res;
-	int y_max_res;
 	int touch_x_max;
 	int touch_y_max;
 	unsigned int cs_pin;
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index 6974746..fe7c4b9 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -4,8 +4,8 @@
 #include <linux/types.h>
 #include <linux/input.h>
 
-#define MATRIX_MAX_ROWS		16
-#define MATRIX_MAX_COLS		16
+#define MATRIX_MAX_ROWS		32
+#define MATRIX_MAX_COLS		32
 
 #define KEY(row, col, val)	((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\
 				 (((col) & (MATRIX_MAX_COLS - 1)) << 16) |\
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 55e0d42..63c5ad7 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -14,6 +14,8 @@
 #include <linux/smp.h>
 #include <linux/percpu.h>
 #include <linux/hrtimer.h>
+#include <linux/kref.h>
+#include <linux/workqueue.h>
 
 #include <asm/atomic.h>
 #include <asm/ptrace.h>
@@ -240,6 +242,35 @@
 extern int irq_select_affinity(unsigned int irq);
 
 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
+
+/**
+ * struct irq_affinity_notify - context for notification of IRQ affinity changes
+ * @irq:		Interrupt to which notification applies
+ * @kref:		Reference count, for internal use
+ * @work:		Work item, for internal use
+ * @notify:		Function to be called on change.  This will be
+ *			called in process context.
+ * @release:		Function to be called on release.  This will be
+ *			called in process context.  Once registered, the
+ *			structure must only be freed when this function is
+ *			called or later.
+ */
+struct irq_affinity_notify {
+	unsigned int irq;
+	struct kref kref;
+	struct work_struct work;
+	void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
+	void (*release)(struct kref *ref);
+};
+
+extern int
+irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
+
+static inline void irq_run_affinity_notifiers(void)
+{
+	flush_scheduled_work();
+}
+
 #else /* CONFIG_SMP */
 
 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
@@ -255,7 +286,7 @@
 static inline int irq_select_affinity(unsigned int irq)  { return 0; }
 
 static inline int irq_set_affinity_hint(unsigned int irq,
-                                        const struct cpumask *m)
+					const struct cpumask *m)
 {
 	return -EINVAL;
 }
diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h
index 5f43a3b..4deb383 100644
--- a/include/linux/ip_vs.h
+++ b/include/linux/ip_vs.h
@@ -89,6 +89,14 @@
 #define IP_VS_CONN_F_TEMPLATE	0x1000		/* template, not connection */
 #define IP_VS_CONN_F_ONE_PACKET	0x2000		/* forward only one packet */
 
+#define IP_VS_CONN_F_BACKUP_MASK (IP_VS_CONN_F_FWD_MASK | \
+				  IP_VS_CONN_F_NOOUTPUT | \
+				  IP_VS_CONN_F_INACTIVE | \
+				  IP_VS_CONN_F_SEQ_MASK | \
+				  IP_VS_CONN_F_NO_CPORT | \
+				  IP_VS_CONN_F_TEMPLATE \
+				 )
+
 /* Flags that are not sent to backup server start from bit 16 */
 #define IP_VS_CONN_F_NFCT	(1 << 16)	/* use netfilter conntrack */
 
diff --git a/include/linux/irq.h b/include/linux/irq.h
index abde252..80fcb53 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -74,7 +74,8 @@
 
 #define IRQF_MODIFY_MASK	\
 	(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
-	 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL)
+	 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
+	 IRQ_PER_CPU)
 
 #ifdef CONFIG_IRQ_PER_CPU
 # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 6a64c6f..bfef56d 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -8,6 +8,7 @@
  * For now it's included from <linux/irq.h>
  */
 
+struct irq_affinity_notify;
 struct proc_dir_entry;
 struct timer_rand_state;
 /**
@@ -24,6 +25,7 @@
  * @last_unhandled:	aging timer for unhandled count
  * @irqs_unhandled:	stats field for spurious unhandled interrupts
  * @lock:		locking for SMP
+ * @affinity_notify:	context for notification of affinity changes
  * @pending_mask:	pending rebalanced interrupts
  * @threads_active:	number of irqaction threads currently running
  * @wait_for_threads:	wait queue for sync_irq to wait for threaded handlers
@@ -70,6 +72,7 @@
 	raw_spinlock_t		lock;
 #ifdef CONFIG_SMP
 	const struct cpumask	*affinity_hint;
+	struct irq_affinity_notify *affinity_notify;
 #ifdef CONFIG_GENERIC_PENDING_IRQ
 	cpumask_var_t		pending_mask;
 #endif
@@ -101,13 +104,6 @@
 #define get_irq_desc_msi(desc)		((desc)->irq_data.msi_desc)
 
 /*
- * Monolithic do_IRQ implementation.
- */
-#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
-extern unsigned int __do_IRQ(unsigned int irq);
-#endif
-
-/*
  * Architectures call this to let the generic IRQ layer
  * handle an interrupt. If the descriptor is attached to an
  * irqchip-style controller then we call the ->handle_irq() handler,
@@ -115,14 +111,7 @@
  */
 static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
 {
-#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
 	desc->handle_irq(irq, desc);
-#else
-	if (likely(desc->handle_irq))
-		desc->handle_irq(irq, desc);
-	else
-		__do_IRQ(irq);
-#endif
 }
 
 static inline void generic_handle_irq(unsigned int irq)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5a9d905..2fe6e84 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -243,6 +243,8 @@
 extern unsigned long get_taint(void);
 extern int root_mountflags;
 
+extern bool early_boot_irqs_disabled;
+
 /* Values used for system_state */
 extern enum system_states {
 	SYSTEM_BOOTING,
@@ -573,12 +575,6 @@
 	char _f[20-2*sizeof(long)-sizeof(int)];	/* Padding: libc5 uses this.. */
 };
 
-/* Force a compilation error if condition is true */
-#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition))
-
-/* Force a compilation error if condition is constant and true */
-#define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)]))
-
 /* Force a compilation error if a constant expression is not a power of 2 */
 #define BUILD_BUG_ON_NOT_POWER_OF_2(n)			\
 	BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
@@ -590,6 +586,32 @@
 #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
 #define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
 
+/**
+ * BUILD_BUG_ON - break compile if a condition is true.
+ * @condition: the condition which the compiler should know is false.
+ *
+ * If you have some code which relies on certain constants being equal, or
+ * other compile-time-evaluated condition, you should use BUILD_BUG_ON to
+ * detect if someone changes it.
+ *
+ * The implementation uses gcc's reluctance to create a negative array, but
+ * gcc (as of 4.4) only emits that error for obvious cases (eg. not arguments
+ * to inline functions).  So as a fallback we use the optimizer; if it can't
+ * prove the condition is false, it will cause a link error on the undefined
+ * "__build_bug_on_failed".  This error message can be harder to track down
+ * though, hence the two different methods.
+ */
+#ifndef __OPTIMIZE__
+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+#else
+extern int __build_bug_on_failed;
+#define BUILD_BUG_ON(condition)					\
+	do {							\
+		((void)sizeof(char[1 - 2*!!(condition)]));	\
+		if (condition) __build_bug_on_failed = 1;	\
+	} while(0)
+#endif
+
 /* Trap pasters of __FUNCTION__ at compile-time */
 #define __FUNCTION__ (__func__)
 
diff --git a/include/linux/klist.h b/include/linux/klist.h
index e91a4e5..a370ce5 100644
--- a/include/linux/klist.h
+++ b/include/linux/klist.h
@@ -22,7 +22,7 @@
 	struct list_head	k_list;
 	void			(*get)(struct klist_node *);
 	void			(*put)(struct klist_node *);
-} __attribute__ ((aligned (4)));
+} __attribute__ ((aligned (sizeof(void *))));
 
 #define KLIST_INIT(_name, _get, _put)					\
 	{ .k_lock	= __SPIN_LOCK_UNLOCKED(_name.k_lock),		\
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
index 08d7dc4..39f8453 100644
--- a/include/linux/kmemcheck.h
+++ b/include/linux/kmemcheck.h
@@ -76,7 +76,7 @@
 									\
 		_n = (long) &((ptr)->name##_end)			\
 			- (long) &((ptr)->name##_begin);		\
-		MAYBE_BUILD_BUG_ON(_n < 0);				\
+		BUILD_BUG_ON(_n < 0);					\
 									\
 		kmemcheck_mark_initialized(&((ptr)->name##_begin), _n);	\
 	} while (0)
diff --git a/include/linux/list.h b/include/linux/list.h
index 9a5f8a7..3a54266 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -96,6 +96,11 @@
  * in an undefined state.
  */
 #ifndef CONFIG_DEBUG_LIST
+static inline void __list_del_entry(struct list_head *entry)
+{
+	__list_del(entry->prev, entry->next);
+}
+
 static inline void list_del(struct list_head *entry)
 {
 	__list_del(entry->prev, entry->next);
@@ -103,6 +108,7 @@
 	entry->prev = LIST_POISON2;
 }
 #else
+extern void __list_del_entry(struct list_head *entry);
 extern void list_del(struct list_head *entry);
 #endif
 
@@ -135,7 +141,7 @@
  */
 static inline void list_del_init(struct list_head *entry)
 {
-	__list_del(entry->prev, entry->next);
+	__list_del_entry(entry);
 	INIT_LIST_HEAD(entry);
 }
 
@@ -146,7 +152,7 @@
  */
 static inline void list_move(struct list_head *list, struct list_head *head)
 {
-	__list_del(list->prev, list->next);
+	__list_del_entry(list);
 	list_add(list, head);
 }
 
@@ -158,7 +164,7 @@
 static inline void list_move_tail(struct list_head *list,
 				  struct list_head *head)
 {
-	__list_del(list->prev, list->next);
+	__list_del_entry(list);
 	list_add_tail(list, head);
 }
 
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 71c09b2..4aef1dd 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -436,16 +436,8 @@
 #endif /* CONFIG_LOCKDEP */
 
 #ifdef CONFIG_TRACE_IRQFLAGS
-extern void early_boot_irqs_off(void);
-extern void early_boot_irqs_on(void);
 extern void print_irqtrace_events(struct task_struct *curr);
 #else
-static inline void early_boot_irqs_off(void)
-{
-}
-static inline void early_boot_irqs_on(void)
-{
-}
 static inline void print_irqtrace_events(struct task_struct *curr)
 {
 }
@@ -522,12 +514,15 @@
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # ifdef CONFIG_PROVE_LOCKING
 #  define lock_map_acquire(l)		lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
+#  define lock_map_acquire_read(l)	lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_)
 # else
 #  define lock_map_acquire(l)		lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
+#  define lock_map_acquire_read(l)	lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_)
 # endif
 # define lock_map_release(l)			lock_release(l, 1, _THIS_IP_)
 #else
 # define lock_map_acquire(l)			do { } while (0)
+# define lock_map_acquire_read(l)		do { } while (0)
 # define lock_map_release(l)			do { } while (0)
 #endif
 
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6a576f9..f512e18 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -146,6 +146,10 @@
 						gfp_t gfp_mask);
 u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail);
+#endif
+
 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
 struct mem_cgroup;
 
@@ -335,6 +339,11 @@
 	return 0;
 }
 
+static inline void mem_cgroup_split_huge_fixup(struct page *head,
+						struct page *tail)
+{
+}
+
 #endif /* CONFIG_CGROUP_MEM_CONT */
 
 #endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
new file mode 100644
index 0000000..dd8da34
--- /dev/null
+++ b/include/linux/micrel_phy.h
@@ -0,0 +1,16 @@
+#ifndef _MICREL_PHY_H
+#define _MICREL_PHY_H
+
+#define MICREL_PHY_ID_MASK	0x00fffff0
+
+#define PHY_ID_KSZ9021		0x00221611
+#define PHY_ID_KS8737		0x00221720
+#define PHY_ID_KS8041		0x00221510
+#define PHY_ID_KS8051		0x00221550
+/* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
+#define PHY_ID_KS8001		0x0022161A
+
+/* struct phy_device dev_flags definitions */
+#define MICREL_PHY_50MHZ_CLK	0x00000001
+
+#endif /* _MICREL_PHY_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 956a355..f6385fc 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -470,6 +470,7 @@
 	page[1].lru.prev = (void *)order;
 }
 
+#ifdef CONFIG_MMU
 /*
  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
  * servicing faults for write access.  In the normal case, do always want
@@ -482,6 +483,7 @@
 		pte = pte_mkwrite(pte);
 	return pte;
 }
+#endif
 
 /*
  * Multiple processes may "see" the same page. E.g. for untouched
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h
index bf17350..38d3930 100644
--- a/include/linux/mmc/sh_mmcif.h
+++ b/include/linux/mmc/sh_mmcif.h
@@ -94,12 +94,12 @@
 
 static inline u32 sh_mmcif_readl(void __iomem *addr, int reg)
 {
-	return readl(addr + reg);
+	return __raw_readl(addr + reg);
 }
 
 static inline void sh_mmcif_writel(void __iomem *addr, int reg, u32 val)
 {
-	writel(val, addr + reg);
+	__raw_writel(val, addr + reg);
 }
 
 #define SH_MMCIF_BBS 512 /* boot block size */
diff --git a/include/linux/module.h b/include/linux/module.h
index 8b17fd8..9bdf27c 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -58,6 +58,12 @@
 	void (*free)(struct module *);
 };
 
+struct module_version_attribute {
+	struct module_attribute mattr;
+	const char *module_name;
+	const char *version;
+};
+
 struct module_kobject
 {
 	struct kobject kobj;
@@ -161,7 +167,28 @@
   Using this automatically adds a checksum of the .c files and the
   local headers in "srcversion".
 */
+
+#if defined(MODULE) || !defined(CONFIG_SYSFS)
 #define MODULE_VERSION(_version) MODULE_INFO(version, _version)
+#else
+#define MODULE_VERSION(_version)					\
+	extern ssize_t __modver_version_show(struct module_attribute *,	\
+					     struct module *, char *);	\
+	static struct module_version_attribute __modver_version_attr	\
+	__used								\
+    __attribute__ ((__section__ ("__modver"),aligned(sizeof(void *)))) \
+	= {								\
+		.mattr	= {						\
+			.attr	= {					\
+				.name	= "version",			\
+				.mode	= S_IRUGO,			\
+			},						\
+			.show	= __modver_version_show,		\
+		},							\
+		.module_name	= KBUILD_MODNAME,			\
+		.version	= _version,				\
+	}
+#endif
 
 /* Optional firmware file (or files) needed by the module
  * format is simply firmware file name.  Multiple firmware
@@ -350,7 +377,7 @@
 	   keeping pointers to this stuff */
 	char *args;
 #ifdef CONFIG_TRACEPOINTS
-	struct tracepoint *tracepoints;
+	struct tracepoint * const *tracepoints_ptrs;
 	unsigned int num_tracepoints;
 #endif
 #ifdef HAVE_JUMP_LABEL
@@ -362,7 +389,7 @@
 	unsigned int num_trace_bprintk_fmt;
 #endif
 #ifdef CONFIG_EVENT_TRACING
-	struct ftrace_event_call *trace_events;
+	struct ftrace_event_call **trace_events;
 	unsigned int num_trace_events;
 #endif
 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 112adf8..07b4195 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -16,15 +16,17 @@
 /* Chosen so that structs with an unsigned long line up. */
 #define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
 
-#ifdef MODULE
 #define ___module_cat(a,b) __mod_ ## a ## b
 #define __module_cat(a,b) ___module_cat(a,b)
+#ifdef MODULE
 #define __MODULE_INFO(tag, name, info)					  \
 static const char __module_cat(name,__LINE__)[]				  \
   __used __attribute__((section(".modinfo"), unused, aligned(1)))	  \
   = __stringify(tag) "=" info
 #else  /* !MODULE */
-#define __MODULE_INFO(tag, name, info)
+/* This struct is here for syntactic coherency, it is not used */
+#define __MODULE_INFO(tag, name, info)					  \
+  struct __module_cat(name,__LINE__) {}
 #endif
 #define __MODULE_PARM_TYPE(name, _type)					  \
   __MODULE_INFO(parmtype, name##type, #name ":" _type)
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index 0fa7a3a..b21d567 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -150,6 +150,7 @@
 extern int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
 extern int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
 extern int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
+extern int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
 extern int ip_mr_init(void);
 #else
 static inline
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 6091ab7..9d2deb2 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -136,6 +136,7 @@
 extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
 extern int ip6_mr_input(struct sk_buff *skb);
 extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg);
+extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
 extern int ip6_mr_init(void);
 extern void ip6_mr_cleanup(void);
 #else
diff --git a/include/linux/net.h b/include/linux/net.h
index 16faa13..94de83c 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -118,6 +118,7 @@
 };
 
 struct socket_wq {
+	/* Note: wait MUST be first field of socket_wq */
 	wait_queue_head_t	wait;
 	struct fasync_struct	*fasync_list;
 	struct rcu_head		rcu;
@@ -142,7 +143,7 @@
 
 	unsigned long		flags;
 
-	struct socket_wq	*wq;
+	struct socket_wq __rcu	*wq;
 
 	struct file		*file;
 	struct sock		*sk;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d971346..ffe56c1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -75,6 +75,9 @@
 #define NET_RX_SUCCESS		0	/* keep 'em coming, baby */
 #define NET_RX_DROP		1	/* packet dropped */
 
+/* Initial net device group. All devices belong to group 0 by default. */
+#define INIT_NETDEV_GROUP	0
+
 /*
  * Transmit return codes: transmit return codes originate from three different
  * namespaces:
@@ -551,14 +554,16 @@
 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
 
 /*
- * The rps_dev_flow structure contains the mapping of a flow to a CPU and the
- * tail pointer for that CPU's input queue at the time of last enqueue.
+ * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
+ * tail pointer for that CPU's input queue at the time of last enqueue, and
+ * a hardware filter index.
  */
 struct rps_dev_flow {
 	u16 cpu;
-	u16 fill;
+	u16 filter;
 	unsigned int last_qtail;
 };
+#define RPS_NO_FILTER 0xffff
 
 /*
  * The rps_dev_flow_table structure contains a table of flow mappings.
@@ -608,6 +613,11 @@
 
 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
 
+#ifdef CONFIG_RFS_ACCEL
+extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
+				u32 flow_id, u16 filter_id);
+#endif
+
 /* This structure contains an instance of an RX queue. */
 struct netdev_rx_queue {
 	struct rps_map __rcu		*rps_map;
@@ -643,6 +653,14 @@
     (nr_cpu_ids * sizeof(struct xps_map *)))
 #endif /* CONFIG_XPS */
 
+#define TC_MAX_QUEUE	16
+#define TC_BITMASK	15
+/* HW offloaded queuing disciplines txq count and offset maps */
+struct netdev_tc_txq {
+	u16 count;
+	u16 offset;
+};
+
 /*
  * This structure defines the management hooks for network devices.
  * The following hooks can be defined; unless noted otherwise, they are
@@ -753,6 +771,38 @@
  * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
  *			  struct nlattr *port[]);
  * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
+ * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
+ * 	Called to setup 'tc' number of traffic classes in the net device. This
+ * 	is always called from the stack with the rtnl lock held and netif tx
+ * 	queues stopped. This allows the netdevice to perform queue management
+ * 	safely.
+ *
+ *	RFS acceleration.
+ * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
+ *			    u16 rxq_index, u32 flow_id);
+ *	Set hardware filter for RFS.  rxq_index is the target queue index;
+ *	flow_id is a flow ID to be passed to rps_may_expire_flow() later.
+ *	Return the filter ID on success, or a negative error code.
+ *
+ *	Slave management functions (for bridge, bonding, etc). User should
+ *	call netdev_set_master() to set dev->master properly.
+ * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
+ *	Called to make another netdev an underling.
+ *
+ * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
+ *	Called to release previously enslaved netdev.
+ *
+ *      Feature/offload setting functions.
+ * u32 (*ndo_fix_features)(struct net_device *dev, u32 features);
+ *	Adjusts the requested feature flags according to device-specific
+ *	constraints, and returns the resulting flags. Must not modify
+ *	the device state.
+ *
+ * int (*ndo_set_features)(struct net_device *dev, u32 features);
+ *	Called to update device configuration to new features. Passed
+ *	feature set might be less than what was returned by ndo_fix_features()).
+ *	Must return >0 or -errno if it changed dev->features itself.
+ *
  */
 #define HAVE_NET_DEVICE_OPS
 struct net_device_ops {
@@ -811,6 +861,7 @@
 						   struct nlattr *port[]);
 	int			(*ndo_get_vf_port)(struct net_device *dev,
 						   int vf, struct sk_buff *skb);
+	int			(*ndo_setup_tc)(struct net_device *dev, u8 tc);
 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
 	int			(*ndo_fcoe_enable)(struct net_device *dev);
 	int			(*ndo_fcoe_disable)(struct net_device *dev);
@@ -825,6 +876,20 @@
 	int			(*ndo_fcoe_get_wwn)(struct net_device *dev,
 						    u64 *wwn, int type);
 #endif
+#ifdef CONFIG_RFS_ACCEL
+	int			(*ndo_rx_flow_steer)(struct net_device *dev,
+						     const struct sk_buff *skb,
+						     u16 rxq_index,
+						     u32 flow_id);
+#endif
+	int			(*ndo_add_slave)(struct net_device *dev,
+						 struct net_device *slave_dev);
+	int			(*ndo_del_slave)(struct net_device *dev,
+						 struct net_device *slave_dev);
+	u32			(*ndo_fix_features)(struct net_device *dev,
+						    u32 features);
+	int			(*ndo_set_features)(struct net_device *dev,
+						    u32 features);
 };
 
 /*
@@ -876,8 +941,18 @@
 	struct list_head	napi_list;
 	struct list_head	unreg_list;
 
-	/* Net device features */
-	unsigned long		features;
+	/* currently active device features */
+	u32			features;
+	/* user-changeable features */
+	u32			hw_features;
+	/* user-requested features */
+	u32			wanted_features;
+	/* VLAN feature mask */
+	u32			vlan_features;
+
+	/* Net device feature bits; if you change something,
+	 * also update netdev_features_strings[] in ethtool.c */
+
 #define NETIF_F_SG		1	/* Scatter/gather IO. */
 #define NETIF_F_IP_CSUM		2	/* Can checksum TCP/UDP over IPv4. */
 #define NETIF_F_NO_CSUM		4	/* Does not require checksum. F.e. loopack. */
@@ -902,6 +977,7 @@
 #define NETIF_F_FCOE_MTU	(1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
 #define NETIF_F_NTUPLE		(1 << 27) /* N-tuple filters supported */
 #define NETIF_F_RXHASH		(1 << 28) /* Receive hashing offload */
+#define NETIF_F_RXCSUM		(1 << 29) /* Receive checksumming offload */
 
 	/* Segmentation offload features */
 #define NETIF_F_GSO_SHIFT	16
@@ -913,6 +989,12 @@
 #define NETIF_F_TSO6		(SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
 #define NETIF_F_FSO		(SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
 
+	/* Features valid for ethtool to change */
+	/* = all defined minus driver/device-class-related */
+#define NETIF_F_NEVER_CHANGE	(NETIF_F_HIGHDMA | NETIF_F_VLAN_CHALLENGED | \
+				  NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
+#define NETIF_F_ETHTOOL_BITS	(0x3f3fffff & ~NETIF_F_NEVER_CHANGE)
+
 	/* List of features with software fallbacks. */
 #define NETIF_F_GSO_SOFTWARE	(NETIF_F_TSO | NETIF_F_TSO_ECN | \
 				 NETIF_F_TSO6 | NETIF_F_UFO)
@@ -923,6 +1005,12 @@
 #define NETIF_F_V6_CSUM		(NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
 #define NETIF_F_ALL_CSUM	(NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
 
+#define NETIF_F_ALL_TSO 	(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+
+#define NETIF_F_ALL_TX_OFFLOADS	(NETIF_F_ALL_CSUM | NETIF_F_SG | \
+				 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+				 NETIF_F_SCTP_CSUM | NETIF_F_FCOE_CRC)
+
 	/*
 	 * If one device supports one of these features, then enable them
 	 * for all in netdev_increment_features.
@@ -931,6 +1019,9 @@
 				 NETIF_F_SG | NETIF_F_HIGHDMA |		\
 				 NETIF_F_FRAGLIST)
 
+	/* changeable features with no special hardware requirements */
+#define NETIF_F_SOFT_FEATURES	(NETIF_F_GSO | NETIF_F_GRO)
+
 	/* Interface index. Unique device identifier	*/
 	int			ifindex;
 	int			iflink;
@@ -1039,6 +1130,13 @@
 
 	/* Number of RX queues currently active in device */
 	unsigned int		real_num_rx_queues;
+
+#ifdef CONFIG_RFS_ACCEL
+	/* CPU reverse-mapping for RX completion interrupts, indexed
+	 * by RX queue number.  Assigned by driver.  This must only be
+	 * set if the ndo_rx_flow_steer operation is defined. */
+	struct cpu_rmap		*rx_cpu_rmap;
+#endif
 #endif
 
 	rx_handler_func_t __rcu	*rx_handler;
@@ -1132,9 +1230,6 @@
 	/* rtnetlink link ops */
 	const struct rtnl_link_ops *rtnl_link_ops;
 
-	/* VLAN feature mask */
-	unsigned long vlan_features;
-
 	/* for setting kernel sock attribute on TCP connection setup */
 #define GSO_MAX_SIZE		65536
 	unsigned int		gso_max_size;
@@ -1143,6 +1238,9 @@
 	/* Data Center Bridging netlink ops */
 	const struct dcbnl_rtnl_ops *dcbnl_ops;
 #endif
+	u8 num_tc;
+	struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+	u8 prio_tc_map[TC_BITMASK + 1];
 
 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
 	/* max exchange id for FCoE LRO by ddp */
@@ -1153,12 +1251,66 @@
 
 	/* phy device may attach itself for hardware timestamping */
 	struct phy_device *phydev;
+
+	/* group the device belongs to */
+	int group;
 };
 #define to_net_dev(d) container_of(d, struct net_device, dev)
 
 #define	NETDEV_ALIGN		32
 
 static inline
+int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
+{
+	return dev->prio_tc_map[prio & TC_BITMASK];
+}
+
+static inline
+int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
+{
+	if (tc >= dev->num_tc)
+		return -EINVAL;
+
+	dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
+	return 0;
+}
+
+static inline
+void netdev_reset_tc(struct net_device *dev)
+{
+	dev->num_tc = 0;
+	memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
+	memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
+}
+
+static inline
+int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
+{
+	if (tc >= dev->num_tc)
+		return -EINVAL;
+
+	dev->tc_to_txq[tc].count = count;
+	dev->tc_to_txq[tc].offset = offset;
+	return 0;
+}
+
+static inline
+int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
+{
+	if (num_tc > TC_MAX_QUEUE)
+		return -EINVAL;
+
+	dev->num_tc = num_tc;
+	return 0;
+}
+
+static inline
+int netdev_get_num_tc(struct net_device *dev)
+{
+	return dev->num_tc;
+}
+
+static inline
 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
 					 unsigned int index)
 {
@@ -1300,7 +1452,7 @@
 					 struct packet_type *,
 					 struct net_device *);
 	struct sk_buff		*(*gso_segment)(struct sk_buff *skb,
-						int features);
+						u32 features);
 	int			(*gso_send_check)(struct sk_buff *skb);
 	struct sk_buff		**(*gro_receive)(struct sk_buff **head,
 					       struct sk_buff *skb);
@@ -1345,7 +1497,7 @@
 	struct net *net;
 
 	net = dev_net(dev);
-	lh = rcu_dereference(dev->dev_list.next);
+	lh = rcu_dereference(list_next_rcu(&dev->dev_list));
 	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
 }
 
@@ -1355,6 +1507,13 @@
 		net_device_entry(net->dev_base_head.next);
 }
 
+static inline struct net_device *first_net_device_rcu(struct net *net)
+{
+	struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
+
+	return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
+}
+
 extern int 			netdev_boot_setup_check(struct net_device *dev);
 extern unsigned long		netdev_boot_base(const char *prefix, int unit);
 extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
@@ -1844,6 +2003,7 @@
 extern int		dev_change_net_namespace(struct net_device *,
 						 struct net *, const char *);
 extern int		dev_set_mtu(struct net_device *, int);
+extern void		dev_set_group(struct net_device *, int);
 extern int		dev_set_mac_address(struct net_device *,
 					    struct sockaddr *);
 extern int		dev_hard_start_xmit(struct sk_buff *skb,
@@ -2267,8 +2427,10 @@
 extern int		netdev_tstamp_prequeue;
 extern int		weight_p;
 extern int		netdev_set_master(struct net_device *dev, struct net_device *master);
+extern int netdev_set_bond_master(struct net_device *dev,
+				  struct net_device *master);
 extern int skb_checksum_help(struct sk_buff *skb);
-extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
+extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features);
 #ifdef CONFIG_BUG
 extern void netdev_rx_csum_fault(struct net_device *dev);
 #else
@@ -2295,22 +2457,26 @@
 
 extern void linkwatch_run_queue(void);
 
-unsigned long netdev_increment_features(unsigned long all, unsigned long one,
-					unsigned long mask);
-unsigned long netdev_fix_features(unsigned long features, const char *name);
+static inline u32 netdev_get_wanted_features(struct net_device *dev)
+{
+	return (dev->features & ~dev->hw_features) | dev->wanted_features;
+}
+u32 netdev_increment_features(u32 all, u32 one, u32 mask);
+u32 netdev_fix_features(struct net_device *dev, u32 features);
+void netdev_update_features(struct net_device *dev);
 
 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
 					struct net_device *dev);
 
-int netif_skb_features(struct sk_buff *skb);
+u32 netif_skb_features(struct sk_buff *skb);
 
-static inline int net_gso_ok(int features, int gso_type)
+static inline int net_gso_ok(u32 features, int gso_type)
 {
 	int feature = gso_type << NETIF_F_GSO_SHIFT;
 	return (features & feature) == feature;
 }
 
-static inline int skb_gso_ok(struct sk_buff *skb, int features)
+static inline int skb_gso_ok(struct sk_buff *skb, u32 features)
 {
 	return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
 	       (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
@@ -2328,15 +2494,9 @@
 	dev->gso_max_size = size;
 }
 
-extern int __skb_bond_should_drop(struct sk_buff *skb,
-				  struct net_device *master);
-
-static inline int skb_bond_should_drop(struct sk_buff *skb,
-				       struct net_device *master)
+static inline int netif_is_bond_slave(struct net_device *dev)
 {
-	if (master)
-		return __skb_bond_should_drop(skb, master);
-	return 0;
+	return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
 }
 
 extern struct pernet_operations __net_initdata loopback_net_ops;
@@ -2351,6 +2511,8 @@
 
 static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
 {
+	if (dev->hw_features & NETIF_F_RXCSUM)
+		return !!(dev->features & NETIF_F_RXCSUM);
 	if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
 		return 0;
 	return dev->ethtool_ops->get_rx_csum(dev);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 1893837..eeec00a 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -24,16 +24,20 @@
 #define NF_MAX_VERDICT NF_STOP
 
 /* we overload the higher bits for encoding auxiliary data such as the queue
- * number. Not nice, but better than additional function arguments. */
-#define NF_VERDICT_MASK 0x0000ffff
-#define NF_VERDICT_BITS 16
+ * number or errno values. Not nice, but better than additional function
+ * arguments. */
+#define NF_VERDICT_MASK 0x000000ff
 
+/* extra verdict flags have mask 0x0000ff00 */
+#define NF_VERDICT_FLAG_QUEUE_BYPASS	0x00008000
+
+/* queue number (NF_QUEUE) or errno (NF_DROP) */
 #define NF_VERDICT_QMASK 0xffff0000
 #define NF_VERDICT_QBITS 16
 
-#define NF_QUEUE_NR(x) ((((x) << NF_VERDICT_BITS) & NF_VERDICT_QMASK) | NF_QUEUE)
+#define NF_QUEUE_NR(x) ((((x) << 16) & NF_VERDICT_QMASK) | NF_QUEUE)
 
-#define NF_DROP_ERR(x) (((-x) << NF_VERDICT_BITS) | NF_DROP)
+#define NF_DROP_ERR(x) (((-x) << 16) | NF_DROP)
 
 /* only for userspace compatibility */
 #ifndef __KERNEL__
@@ -41,6 +45,9 @@
    <= 0x2000 is used for protocol-flags. */
 #define NFC_UNKNOWN 0x4000
 #define NFC_ALTERED 0x8000
+
+/* NF_VERDICT_BITS should be 8 now, but userspace might break if this changes */
+#define NF_VERDICT_BITS 16
 #endif
 
 enum nf_inet_hooks {
@@ -72,6 +79,10 @@
 
 #ifdef __KERNEL__
 #ifdef CONFIG_NETFILTER
+static inline int NF_DROP_GETERR(int verdict)
+{
+	return -(verdict >> NF_VERDICT_QBITS);
+}
 
 static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
 				   const union nf_inet_addr *a2)
@@ -267,7 +278,7 @@
 	int		route_key_size;
 };
 
-extern const struct nf_afinfo *nf_afinfo[NFPROTO_NUMPROTO];
+extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO];
 static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family)
 {
 	return rcu_dereference(nf_afinfo[family]);
@@ -357,9 +368,9 @@
 #endif /*CONFIG_NETFILTER*/
 
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
+extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu;
 extern void nf_ct_attach(struct sk_buff *, struct sk_buff *);
-extern void (*nf_ct_destroy)(struct nf_conntrack *);
+extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
 #else
 static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
 #endif
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
index 9d40eff..15e83bf 100644
--- a/include/linux/netfilter/Kbuild
+++ b/include/linux/netfilter/Kbuild
@@ -1,3 +1,5 @@
+header-y += ipset/
+
 header-y += nf_conntrack_common.h
 header-y += nf_conntrack_ftp.h
 header-y += nf_conntrack_sctp.h
@@ -9,6 +11,7 @@
 header-y += nfnetlink_log.h
 header-y += nfnetlink_queue.h
 header-y += x_tables.h
+header-y += xt_AUDIT.h
 header-y += xt_CHECKSUM.h
 header-y += xt_CLASSIFY.h
 header-y += xt_CONNMARK.h
@@ -34,6 +37,7 @@
 header-y += xt_conntrack.h
 header-y += xt_cpu.h
 header-y += xt_dccp.h
+header-y += xt_devgroup.h
 header-y += xt_dscp.h
 header-y += xt_esp.h
 header-y += xt_hashlimit.h
@@ -54,7 +58,9 @@
 header-y += xt_rateest.h
 header-y += xt_realm.h
 header-y += xt_recent.h
+header-y += xt_set.h
 header-y += xt_sctp.h
+header-y += xt_socket.h
 header-y += xt_state.h
 header-y += xt_statistic.h
 header-y += xt_string.h
diff --git a/include/linux/netfilter/ipset/Kbuild b/include/linux/netfilter/ipset/Kbuild
new file mode 100644
index 0000000..601fe71
--- /dev/null
+++ b/include/linux/netfilter/ipset/Kbuild
@@ -0,0 +1,4 @@
+header-y += ip_set.h
+header-y += ip_set_bitmap.h
+header-y += ip_set_hash.h
+header-y += ip_set_list.h
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
new file mode 100644
index 0000000..ec333d8
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -0,0 +1,452 @@
+#ifndef _IP_SET_H
+#define _IP_SET_H
+
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ *                         Patrick Schaaf <bof@bof.de>
+ *                         Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* The protocol version */
+#define IPSET_PROTOCOL		6
+
+/* The max length of strings including NUL: set and type identifiers */
+#define IPSET_MAXNAMELEN	32
+
+/* Message types and commands */
+enum ipset_cmd {
+	IPSET_CMD_NONE,
+	IPSET_CMD_PROTOCOL,	/* 1: Return protocol version */
+	IPSET_CMD_CREATE,	/* 2: Create a new (empty) set */
+	IPSET_CMD_DESTROY,	/* 3: Destroy a (empty) set */
+	IPSET_CMD_FLUSH,	/* 4: Remove all elements from a set */
+	IPSET_CMD_RENAME,	/* 5: Rename a set */
+	IPSET_CMD_SWAP,		/* 6: Swap two sets */
+	IPSET_CMD_LIST,		/* 7: List sets */
+	IPSET_CMD_SAVE,		/* 8: Save sets */
+	IPSET_CMD_ADD,		/* 9: Add an element to a set */
+	IPSET_CMD_DEL,		/* 10: Delete an element from a set */
+	IPSET_CMD_TEST,		/* 11: Test an element in a set */
+	IPSET_CMD_HEADER,	/* 12: Get set header data only */
+	IPSET_CMD_TYPE,		/* 13: Get set type */
+	IPSET_MSG_MAX,		/* Netlink message commands */
+
+	/* Commands in userspace: */
+	IPSET_CMD_RESTORE = IPSET_MSG_MAX, /* 14: Enter restore mode */
+	IPSET_CMD_HELP,		/* 15: Get help */
+	IPSET_CMD_VERSION,	/* 16: Get program version */
+	IPSET_CMD_QUIT,		/* 17: Quit from interactive mode */
+
+	IPSET_CMD_MAX,
+
+	IPSET_CMD_COMMIT = IPSET_CMD_MAX, /* 18: Commit buffered commands */
+};
+
+/* Attributes at command level */
+enum {
+	IPSET_ATTR_UNSPEC,
+	IPSET_ATTR_PROTOCOL,	/* 1: Protocol version */
+	IPSET_ATTR_SETNAME,	/* 2: Name of the set */
+	IPSET_ATTR_TYPENAME,	/* 3: Typename */
+	IPSET_ATTR_SETNAME2 = IPSET_ATTR_TYPENAME, /* Setname at rename/swap */
+	IPSET_ATTR_REVISION,	/* 4: Settype revision */
+	IPSET_ATTR_FAMILY,	/* 5: Settype family */
+	IPSET_ATTR_FLAGS,	/* 6: Flags at command level */
+	IPSET_ATTR_DATA,	/* 7: Nested attributes */
+	IPSET_ATTR_ADT,		/* 8: Multiple data containers */
+	IPSET_ATTR_LINENO,	/* 9: Restore lineno */
+	IPSET_ATTR_PROTOCOL_MIN, /* 10: Minimal supported version number */
+	IPSET_ATTR_REVISION_MIN	= IPSET_ATTR_PROTOCOL_MIN, /* type rev min */
+	__IPSET_ATTR_CMD_MAX,
+};
+#define IPSET_ATTR_CMD_MAX	(__IPSET_ATTR_CMD_MAX - 1)
+
+/* CADT specific attributes */
+enum {
+	IPSET_ATTR_IP = IPSET_ATTR_UNSPEC + 1,
+	IPSET_ATTR_IP_FROM = IPSET_ATTR_IP,
+	IPSET_ATTR_IP_TO,	/* 2 */
+	IPSET_ATTR_CIDR,	/* 3 */
+	IPSET_ATTR_PORT,	/* 4 */
+	IPSET_ATTR_PORT_FROM = IPSET_ATTR_PORT,
+	IPSET_ATTR_PORT_TO,	/* 5 */
+	IPSET_ATTR_TIMEOUT,	/* 6 */
+	IPSET_ATTR_PROTO,	/* 7 */
+	IPSET_ATTR_CADT_FLAGS,	/* 8 */
+	IPSET_ATTR_CADT_LINENO = IPSET_ATTR_LINENO,	/* 9 */
+	/* Reserve empty slots */
+	IPSET_ATTR_CADT_MAX = 16,
+	/* Create-only specific attributes */
+	IPSET_ATTR_GC,
+	IPSET_ATTR_HASHSIZE,
+	IPSET_ATTR_MAXELEM,
+	IPSET_ATTR_NETMASK,
+	IPSET_ATTR_PROBES,
+	IPSET_ATTR_RESIZE,
+	IPSET_ATTR_SIZE,
+	/* Kernel-only */
+	IPSET_ATTR_ELEMENTS,
+	IPSET_ATTR_REFERENCES,
+	IPSET_ATTR_MEMSIZE,
+
+	__IPSET_ATTR_CREATE_MAX,
+};
+#define IPSET_ATTR_CREATE_MAX	(__IPSET_ATTR_CREATE_MAX - 1)
+
+/* ADT specific attributes */
+enum {
+	IPSET_ATTR_ETHER = IPSET_ATTR_CADT_MAX + 1,
+	IPSET_ATTR_NAME,
+	IPSET_ATTR_NAMEREF,
+	IPSET_ATTR_IP2,
+	IPSET_ATTR_CIDR2,
+	__IPSET_ATTR_ADT_MAX,
+};
+#define IPSET_ATTR_ADT_MAX	(__IPSET_ATTR_ADT_MAX - 1)
+
+/* IP specific attributes */
+enum {
+	IPSET_ATTR_IPADDR_IPV4 = IPSET_ATTR_UNSPEC + 1,
+	IPSET_ATTR_IPADDR_IPV6,
+	__IPSET_ATTR_IPADDR_MAX,
+};
+#define IPSET_ATTR_IPADDR_MAX	(__IPSET_ATTR_IPADDR_MAX - 1)
+
+/* Error codes */
+enum ipset_errno {
+	IPSET_ERR_PRIVATE = 4096,
+	IPSET_ERR_PROTOCOL,
+	IPSET_ERR_FIND_TYPE,
+	IPSET_ERR_MAX_SETS,
+	IPSET_ERR_BUSY,
+	IPSET_ERR_EXIST_SETNAME2,
+	IPSET_ERR_TYPE_MISMATCH,
+	IPSET_ERR_EXIST,
+	IPSET_ERR_INVALID_CIDR,
+	IPSET_ERR_INVALID_NETMASK,
+	IPSET_ERR_INVALID_FAMILY,
+	IPSET_ERR_TIMEOUT,
+	IPSET_ERR_REFERENCED,
+	IPSET_ERR_IPADDR_IPV4,
+	IPSET_ERR_IPADDR_IPV6,
+
+	/* Type specific error codes */
+	IPSET_ERR_TYPE_SPECIFIC = 4352,
+};
+
+/* Flags at command level */
+enum ipset_cmd_flags {
+	IPSET_FLAG_BIT_EXIST	= 0,
+	IPSET_FLAG_EXIST	= (1 << IPSET_FLAG_BIT_EXIST),
+};
+
+/* Flags at CADT attribute level */
+enum ipset_cadt_flags {
+	IPSET_FLAG_BIT_BEFORE	= 0,
+	IPSET_FLAG_BEFORE	= (1 << IPSET_FLAG_BIT_BEFORE),
+};
+
+/* Commands with settype-specific attributes */
+enum ipset_adt {
+	IPSET_ADD,
+	IPSET_DEL,
+	IPSET_TEST,
+	IPSET_ADT_MAX,
+	IPSET_CREATE = IPSET_ADT_MAX,
+	IPSET_CADT_MAX,
+};
+
+#ifdef __KERNEL__
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/vmalloc.h>
+#include <net/netlink.h>
+
+/* Sets are identified by an index in kernel space. Tweak with ip_set_id_t
+ * and IPSET_INVALID_ID if you want to increase the max number of sets.
+ */
+typedef u16 ip_set_id_t;
+
+#define IPSET_INVALID_ID		65535
+
+enum ip_set_dim {
+	IPSET_DIM_ZERO = 0,
+	IPSET_DIM_ONE,
+	IPSET_DIM_TWO,
+	IPSET_DIM_THREE,
+	/* Max dimension in elements.
+	 * If changed, new revision of iptables match/target is required.
+	 */
+	IPSET_DIM_MAX = 6,
+};
+
+/* Option flags for kernel operations */
+enum ip_set_kopt {
+	IPSET_INV_MATCH = (1 << IPSET_DIM_ZERO),
+	IPSET_DIM_ONE_SRC = (1 << IPSET_DIM_ONE),
+	IPSET_DIM_TWO_SRC = (1 << IPSET_DIM_TWO),
+	IPSET_DIM_THREE_SRC = (1 << IPSET_DIM_THREE),
+};
+
+/* Set features */
+enum ip_set_feature {
+	IPSET_TYPE_IP_FLAG = 0,
+	IPSET_TYPE_IP = (1 << IPSET_TYPE_IP_FLAG),
+	IPSET_TYPE_PORT_FLAG = 1,
+	IPSET_TYPE_PORT = (1 << IPSET_TYPE_PORT_FLAG),
+	IPSET_TYPE_MAC_FLAG = 2,
+	IPSET_TYPE_MAC = (1 << IPSET_TYPE_MAC_FLAG),
+	IPSET_TYPE_IP2_FLAG = 3,
+	IPSET_TYPE_IP2 = (1 << IPSET_TYPE_IP2_FLAG),
+	IPSET_TYPE_NAME_FLAG = 4,
+	IPSET_TYPE_NAME = (1 << IPSET_TYPE_NAME_FLAG),
+	/* Strictly speaking not a feature, but a flag for dumping:
+	 * this settype must be dumped last */
+	IPSET_DUMP_LAST_FLAG = 7,
+	IPSET_DUMP_LAST = (1 << IPSET_DUMP_LAST_FLAG),
+};
+
+struct ip_set;
+
+typedef int (*ipset_adtfn)(struct ip_set *set, void *value, u32 timeout);
+
+/* Set type, variant-specific part */
+struct ip_set_type_variant {
+	/* Kernelspace: test/add/del entries
+	 *		returns negative error code,
+	 *			zero for no match/success to add/delete
+	 *			positive for matching element */
+	int (*kadt)(struct ip_set *set, const struct sk_buff * skb,
+		    enum ipset_adt adt, u8 pf, u8 dim, u8 flags);
+
+	/* Userspace: test/add/del entries
+	 *		returns negative error code,
+	 *			zero for no match/success to add/delete
+	 *			positive for matching element */
+	int (*uadt)(struct ip_set *set, struct nlattr *tb[],
+		    enum ipset_adt adt, u32 *lineno, u32 flags);
+
+	/* Low level add/del/test functions */
+	ipset_adtfn adt[IPSET_ADT_MAX];
+
+	/* When adding entries and set is full, try to resize the set */
+	int (*resize)(struct ip_set *set, bool retried);
+	/* Destroy the set */
+	void (*destroy)(struct ip_set *set);
+	/* Flush the elements */
+	void (*flush)(struct ip_set *set);
+	/* Expire entries before listing */
+	void (*expire)(struct ip_set *set);
+	/* List set header data */
+	int (*head)(struct ip_set *set, struct sk_buff *skb);
+	/* List elements */
+	int (*list)(const struct ip_set *set, struct sk_buff *skb,
+		    struct netlink_callback *cb);
+
+	/* Return true if "b" set is the same as "a"
+	 * according to the create set parameters */
+	bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
+};
+
+/* The core set type structure */
+struct ip_set_type {
+	struct list_head list;
+
+	/* Typename */
+	char name[IPSET_MAXNAMELEN];
+	/* Protocol version */
+	u8 protocol;
+	/* Set features to control swapping */
+	u8 features;
+	/* Set type dimension */
+	u8 dimension;
+	/* Supported family: may be AF_UNSPEC for both AF_INET/AF_INET6 */
+	u8 family;
+	/* Type revision */
+	u8 revision;
+
+	/* Create set */
+	int (*create)(struct ip_set *set, struct nlattr *tb[], u32 flags);
+
+	/* Attribute policies */
+	const struct nla_policy create_policy[IPSET_ATTR_CREATE_MAX + 1];
+	const struct nla_policy adt_policy[IPSET_ATTR_ADT_MAX + 1];
+
+	/* Set this to THIS_MODULE if you are a module, otherwise NULL */
+	struct module *me;
+};
+
+/* register and unregister set type */
+extern int ip_set_type_register(struct ip_set_type *set_type);
+extern void ip_set_type_unregister(struct ip_set_type *set_type);
+
+/* A generic IP set */
+struct ip_set {
+	/* The name of the set */
+	char name[IPSET_MAXNAMELEN];
+	/* Lock protecting the set data */
+	rwlock_t lock;
+	/* References to the set */
+	atomic_t ref;
+	/* The core set type */
+	struct ip_set_type *type;
+	/* The type variant doing the real job */
+	const struct ip_set_type_variant *variant;
+	/* The actual INET family of the set */
+	u8 family;
+	/* The type specific data */
+	void *data;
+};
+
+/* register and unregister set references */
+extern ip_set_id_t ip_set_get_byname(const char *name, struct ip_set **set);
+extern void ip_set_put_byindex(ip_set_id_t index);
+extern const char * ip_set_name_byindex(ip_set_id_t index);
+extern ip_set_id_t ip_set_nfnl_get(const char *name);
+extern ip_set_id_t ip_set_nfnl_get_byindex(ip_set_id_t index);
+extern void ip_set_nfnl_put(ip_set_id_t index);
+
+/* API for iptables set match, and SET target */
+extern int ip_set_add(ip_set_id_t id, const struct sk_buff *skb,
+		      u8 family, u8 dim, u8 flags);
+extern int ip_set_del(ip_set_id_t id, const struct sk_buff *skb,
+		      u8 family, u8 dim, u8 flags);
+extern int ip_set_test(ip_set_id_t id, const struct sk_buff *skb,
+		       u8 family, u8 dim, u8 flags);
+
+/* Utility functions */
+extern void * ip_set_alloc(size_t size);
+extern void ip_set_free(void *members);
+extern int ip_set_get_ipaddr4(struct nlattr *nla,  __be32 *ipaddr);
+extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
+
+static inline int
+ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr)
+{
+	__be32 ip;
+	int ret = ip_set_get_ipaddr4(nla, &ip);
+	
+	if (ret)
+		return ret;
+	*ipaddr = ntohl(ip);
+	return 0;
+}
+
+/* Ignore IPSET_ERR_EXIST errors if asked to do so? */
+static inline bool
+ip_set_eexist(int ret, u32 flags)
+{
+	return ret == -IPSET_ERR_EXIST && (flags & IPSET_FLAG_EXIST);
+}
+
+/* Check the NLA_F_NET_BYTEORDER flag */
+static inline bool
+ip_set_attr_netorder(struct nlattr *tb[], int type)
+{
+	return tb[type] && (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
+}
+
+static inline bool
+ip_set_optattr_netorder(struct nlattr *tb[], int type)
+{
+	return !tb[type] || (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
+}
+
+/* Useful converters */
+static inline u32
+ip_set_get_h32(const struct nlattr *attr)
+{
+	return ntohl(nla_get_be32(attr));
+}
+
+static inline u16
+ip_set_get_h16(const struct nlattr *attr)
+{
+	return ntohs(nla_get_be16(attr));
+}
+
+#define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED)
+#define ipset_nest_end(skb, start)  nla_nest_end(skb, start)
+
+#define NLA_PUT_IPADDR4(skb, type, ipaddr)			\
+do {								\
+	struct nlattr *__nested = ipset_nest_start(skb, type);	\
+								\
+	if (!__nested)						\
+		goto nla_put_failure;				\
+	NLA_PUT_NET32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);	\
+	ipset_nest_end(skb, __nested);				\
+} while (0)
+
+#define NLA_PUT_IPADDR6(skb, type, ipaddrptr)			\
+do {								\
+	struct nlattr *__nested = ipset_nest_start(skb, type);	\
+								\
+	if (!__nested)						\
+		goto nla_put_failure;				\
+	NLA_PUT(skb, IPSET_ATTR_IPADDR_IPV6,			\
+		sizeof(struct in6_addr), ipaddrptr);		\
+	ipset_nest_end(skb, __nested);				\
+} while (0)
+
+/* Get address from skbuff */
+static inline __be32
+ip4addr(const struct sk_buff *skb, bool src)
+{
+	return src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
+}
+
+static inline void
+ip4addrptr(const struct sk_buff *skb, bool src, __be32 *addr)
+{
+	*addr = src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
+}
+
+static inline void
+ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
+{
+	memcpy(addr, src ? &ipv6_hdr(skb)->saddr : &ipv6_hdr(skb)->daddr,
+	       sizeof(*addr));
+}
+
+/* Calculate the bytes required to store the inclusive range of a-b */
+static inline int
+bitmap_bytes(u32 a, u32 b)
+{
+	return 4 * ((((b - a + 8) / 8) + 3) / 4);
+}
+
+/* Interface to iptables/ip6tables */
+
+#define SO_IP_SET		83
+
+union ip_set_name_index {
+	char name[IPSET_MAXNAMELEN];
+	ip_set_id_t index;
+};
+
+#define IP_SET_OP_GET_BYNAME	0x00000006	/* Get set index by name */
+struct ip_set_req_get_set {
+	unsigned op;
+	unsigned version;
+	union ip_set_name_index set;
+};
+
+#define IP_SET_OP_GET_BYINDEX	0x00000007	/* Get set name by index */
+/* Uses ip_set_req_get_set */
+
+#define IP_SET_OP_VERSION	0x00000100	/* Ask kernel version */
+struct ip_set_req_version {
+	unsigned op;
+	unsigned version;
+};
+
+#endif	/* __KERNEL__ */
+
+#endif /*_IP_SET_H */
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
new file mode 100644
index 0000000..ec9d9be
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_ahash.h
@@ -0,0 +1,1074 @@
+#ifndef _IP_SET_AHASH_H
+#define _IP_SET_AHASH_H
+
+#include <linux/rcupdate.h>
+#include <linux/jhash.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+
+/* Hashing which uses arrays to resolve clashing. The hash table is resized
+ * (doubled) when searching becomes too long.
+ * Internally jhash is used with the assumption that the size of the
+ * stored data is a multiple of sizeof(u32). If storage supports timeout,
+ * the timeout field must be the last one in the data structure - that field
+ * is ignored when computing the hash key.
+ *
+ * Readers and resizing
+ *
+ * Resizing can be triggered by userspace command only, and those
+ * are serialized by the nfnl mutex. During resizing the set is
+ * read-locked, so the only possible concurrent operations are
+ * the kernel side readers. Those must be protected by proper RCU locking.
+ */
+
+/* Number of elements to store in an initial array block */
+#define AHASH_INIT_SIZE			4
+/* Max number of elements to store in an array block */
+#define AHASH_MAX_SIZE			(3*4)
+
+/* A hash bucket */
+struct hbucket {
+	void *value;		/* the array of the values */
+	u8 size;		/* size of the array */
+	u8 pos;			/* position of the first free entry */
+};
+
+/* The hash table: the table size stored here in order to make resizing easy */
+struct htable {
+	u8 htable_bits;		/* size of hash table == 2^htable_bits */
+	struct hbucket bucket[0]; /* hashtable buckets */
+};
+
+#define hbucket(h, i)		&((h)->bucket[i])
+
+/* Book-keeping of the prefixes added to the set */
+struct ip_set_hash_nets {
+	u8 cidr;		/* the different cidr values in the set */
+	u32 nets;		/* number of elements per cidr */
+};
+
+/* The generic ip_set hash structure */
+struct ip_set_hash {
+	struct htable *table;	/* the hash table */
+	u32 maxelem;		/* max elements in the hash */
+	u32 elements;		/* current element (vs timeout) */
+	u32 initval;		/* random jhash init value */
+	u32 timeout;		/* timeout value, if enabled */
+	struct timer_list gc;	/* garbage collection when timeout enabled */
+#ifdef IP_SET_HASH_WITH_NETMASK
+	u8 netmask;		/* netmask value for subnets to store */
+#endif
+#ifdef IP_SET_HASH_WITH_NETS
+	struct ip_set_hash_nets nets[0]; /* book-keeping of prefixes */
+#endif
+};
+
+/* Compute htable_bits from the user input parameter hashsize */
+static u8
+htable_bits(u32 hashsize)
+{
+	/* Assume that hashsize == 2^htable_bits */
+	u8 bits = fls(hashsize - 1);
+	if (jhash_size(bits) != hashsize)
+		/* Round up to the first 2^n value */
+		bits = fls(hashsize);
+
+	return bits;
+}
+
+#ifdef IP_SET_HASH_WITH_NETS
+
+#define SET_HOST_MASK(family)	(family == AF_INET ? 32 : 128)
+
+/* Network cidr size book keeping when the hash stores different
+ * sized networks */
+static void
+add_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask)
+{
+	u8 i;
+
+	++h->nets[cidr-1].nets;
+
+	pr_debug("add_cidr added %u: %u\n", cidr, h->nets[cidr-1].nets);
+
+	if (h->nets[cidr-1].nets > 1)
+		return;
+
+	/* New cidr size */
+	for (i = 0; i < host_mask && h->nets[i].cidr; i++) {
+		/* Add in increasing prefix order, so larger cidr first */
+		if (h->nets[i].cidr < cidr)
+			swap(h->nets[i].cidr, cidr);
+	}
+	if (i < host_mask)
+		h->nets[i].cidr = cidr;
+}
+
+static void
+del_cidr(struct ip_set_hash *h, u8 cidr, u8 host_mask)
+{
+	u8 i;
+
+	--h->nets[cidr-1].nets;
+
+	pr_debug("del_cidr deleted %u: %u\n", cidr, h->nets[cidr-1].nets);
+
+	if (h->nets[cidr-1].nets != 0)
+		return;
+
+	/* All entries with this cidr size deleted, so cleanup h->cidr[] */
+	for (i = 0; i < host_mask - 1 && h->nets[i].cidr; i++) {
+		if (h->nets[i].cidr == cidr)
+			h->nets[i].cidr = cidr = h->nets[i+1].cidr;
+	}
+	h->nets[i - 1].cidr = 0;
+}
+#endif
+
+/* Destroy the hashtable part of the set */
+static void
+ahash_destroy(struct htable *t)
+{
+	struct hbucket *n;
+	u32 i;
+
+	for (i = 0; i < jhash_size(t->htable_bits); i++) {
+		n = hbucket(t, i);
+		if (n->size)
+			/* FIXME: use slab cache */
+			kfree(n->value);
+	}
+
+	ip_set_free(t);
+}
+
+/* Calculate the actual memory size of the set data */
+static size_t
+ahash_memsize(const struct ip_set_hash *h, size_t dsize, u8 host_mask)
+{
+	u32 i;
+	struct htable *t = h->table;
+	size_t memsize = sizeof(*h)
+			 + sizeof(*t)
+#ifdef IP_SET_HASH_WITH_NETS
+			 + sizeof(struct ip_set_hash_nets) * host_mask
+#endif
+			 + jhash_size(t->htable_bits) * sizeof(struct hbucket);
+
+	for (i = 0; i < jhash_size(t->htable_bits); i++)
+			memsize += t->bucket[i].size * dsize;
+
+	return memsize;
+}
+
+/* Flush a hash type of set: destroy all elements */
+static void
+ip_set_hash_flush(struct ip_set *set)
+{
+	struct ip_set_hash *h = set->data;
+	struct htable *t = h->table;
+	struct hbucket *n;
+	u32 i;
+
+	for (i = 0; i < jhash_size(t->htable_bits); i++) {
+		n = hbucket(t, i);
+		if (n->size) {
+			n->size = n->pos = 0;
+			/* FIXME: use slab cache */
+			kfree(n->value);
+		}
+	}
+#ifdef IP_SET_HASH_WITH_NETS
+	memset(h->nets, 0, sizeof(struct ip_set_hash_nets)
+			   * SET_HOST_MASK(set->family));
+#endif
+	h->elements = 0;
+}
+
+/* Destroy a hash type of set */
+static void
+ip_set_hash_destroy(struct ip_set *set)
+{
+	struct ip_set_hash *h = set->data;
+
+	if (with_timeout(h->timeout))
+		del_timer_sync(&h->gc);
+
+	ahash_destroy(h->table);
+	kfree(h);
+
+	set->data = NULL;
+}
+
+#define HKEY(data, initval, htable_bits)				 \
+(jhash2((u32 *)(data), sizeof(struct type_pf_elem)/sizeof(u32), initval) \
+	& jhash_mask(htable_bits))
+
+#endif /* _IP_SET_AHASH_H */
+
+#define CONCAT(a, b, c)		a##b##c
+#define TOKEN(a, b, c)		CONCAT(a, b, c)
+
+/* Type/family dependent function prototypes */
+
+#define type_pf_data_equal	TOKEN(TYPE, PF, _data_equal)
+#define type_pf_data_isnull	TOKEN(TYPE, PF, _data_isnull)
+#define type_pf_data_copy	TOKEN(TYPE, PF, _data_copy)
+#define type_pf_data_zero_out	TOKEN(TYPE, PF, _data_zero_out)
+#define type_pf_data_netmask	TOKEN(TYPE, PF, _data_netmask)
+#define type_pf_data_list	TOKEN(TYPE, PF, _data_list)
+#define type_pf_data_tlist	TOKEN(TYPE, PF, _data_tlist)
+
+#define type_pf_elem		TOKEN(TYPE, PF, _elem)
+#define type_pf_telem		TOKEN(TYPE, PF, _telem)
+#define type_pf_data_timeout	TOKEN(TYPE, PF, _data_timeout)
+#define type_pf_data_expired	TOKEN(TYPE, PF, _data_expired)
+#define type_pf_data_timeout_set TOKEN(TYPE, PF, _data_timeout_set)
+
+#define type_pf_elem_add	TOKEN(TYPE, PF, _elem_add)
+#define type_pf_add		TOKEN(TYPE, PF, _add)
+#define type_pf_del		TOKEN(TYPE, PF, _del)
+#define type_pf_test_cidrs	TOKEN(TYPE, PF, _test_cidrs)
+#define type_pf_test		TOKEN(TYPE, PF, _test)
+
+#define type_pf_elem_tadd	TOKEN(TYPE, PF, _elem_tadd)
+#define type_pf_del_telem	TOKEN(TYPE, PF, _ahash_del_telem)
+#define type_pf_expire		TOKEN(TYPE, PF, _expire)
+#define type_pf_tadd		TOKEN(TYPE, PF, _tadd)
+#define type_pf_tdel		TOKEN(TYPE, PF, _tdel)
+#define type_pf_ttest_cidrs	TOKEN(TYPE, PF, _ahash_ttest_cidrs)
+#define type_pf_ttest		TOKEN(TYPE, PF, _ahash_ttest)
+
+#define type_pf_resize		TOKEN(TYPE, PF, _resize)
+#define type_pf_tresize		TOKEN(TYPE, PF, _tresize)
+#define type_pf_flush		ip_set_hash_flush
+#define type_pf_destroy		ip_set_hash_destroy
+#define type_pf_head		TOKEN(TYPE, PF, _head)
+#define type_pf_list		TOKEN(TYPE, PF, _list)
+#define type_pf_tlist		TOKEN(TYPE, PF, _tlist)
+#define type_pf_same_set	TOKEN(TYPE, PF, _same_set)
+#define type_pf_kadt		TOKEN(TYPE, PF, _kadt)
+#define type_pf_uadt		TOKEN(TYPE, PF, _uadt)
+#define type_pf_gc		TOKEN(TYPE, PF, _gc)
+#define type_pf_gc_init		TOKEN(TYPE, PF, _gc_init)
+#define type_pf_variant		TOKEN(TYPE, PF, _variant)
+#define type_pf_tvariant	TOKEN(TYPE, PF, _tvariant)
+
+/* Flavour without timeout */
+
+/* Get the ith element from the array block n */
+#define ahash_data(n, i)	\
+	((struct type_pf_elem *)((n)->value) + (i))
+
+/* Add an element to the hash table when resizing the set:
+ * we spare the maintenance of the internal counters. */
+static int
+type_pf_elem_add(struct hbucket *n, const struct type_pf_elem *value)
+{
+	if (n->pos >= n->size) {
+		void *tmp;
+
+		if (n->size >= AHASH_MAX_SIZE)
+			/* Trigger rehashing */
+			return -EAGAIN;
+
+		tmp = kzalloc((n->size + AHASH_INIT_SIZE)
+			      * sizeof(struct type_pf_elem),
+			      GFP_ATOMIC);
+		if (!tmp)
+			return -ENOMEM;
+		if (n->size) {
+			memcpy(tmp, n->value,
+			       sizeof(struct type_pf_elem) * n->size);
+			kfree(n->value);
+		}
+		n->value = tmp;
+		n->size += AHASH_INIT_SIZE;
+	}
+	type_pf_data_copy(ahash_data(n, n->pos++), value);
+	return 0;
+}
+
+/* Resize a hash: create a new hash table with doubling the hashsize
+ * and inserting the elements to it. Repeat until we succeed or
+ * fail due to memory pressures. */
+static int
+type_pf_resize(struct ip_set *set, bool retried)
+{
+	struct ip_set_hash *h = set->data;
+	struct htable *t, *orig = h->table;
+	u8 htable_bits = orig->htable_bits;
+	const struct type_pf_elem *data;
+	struct hbucket *n, *m;
+	u32 i, j;
+	int ret;
+
+retry:
+	ret = 0;
+	htable_bits++;
+	pr_debug("attempt to resize set %s from %u to %u, t %p\n",
+		 set->name, orig->htable_bits, htable_bits, orig);
+	if (!htable_bits)
+		/* In case we have plenty of memory :-) */
+		return -IPSET_ERR_HASH_FULL;
+	t = ip_set_alloc(sizeof(*t)
+			 + jhash_size(htable_bits) * sizeof(struct hbucket));
+	if (!t)
+		return -ENOMEM;
+	t->htable_bits = htable_bits;
+
+	read_lock_bh(&set->lock);
+	for (i = 0; i < jhash_size(orig->htable_bits); i++) {
+		n = hbucket(orig, i);
+		for (j = 0; j < n->pos; j++) {
+			data = ahash_data(n, j);
+			m = hbucket(t, HKEY(data, h->initval, htable_bits));
+			ret = type_pf_elem_add(m, data);
+			if (ret < 0) {
+				read_unlock_bh(&set->lock);
+				ahash_destroy(t);
+				if (ret == -EAGAIN)
+					goto retry;
+				return ret;
+			}
+		}
+	}
+
+	rcu_assign_pointer(h->table, t);
+	read_unlock_bh(&set->lock);
+
+	/* Give time to other readers of the set */
+	synchronize_rcu_bh();
+
+	pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
+		 orig->htable_bits, orig, t->htable_bits, t);
+	ahash_destroy(orig);
+
+	return 0;
+}
+
+/* Add an element to a hash and update the internal counters when succeeded,
+ * otherwise report the proper error code. */
+static int
+type_pf_add(struct ip_set *set, void *value, u32 timeout)
+{
+	struct ip_set_hash *h = set->data;
+	struct htable *t;
+	const struct type_pf_elem *d = value;
+	struct hbucket *n;
+	int i, ret = 0;
+	u32 key;
+
+	if (h->elements >= h->maxelem)
+		return -IPSET_ERR_HASH_FULL;
+
+	rcu_read_lock_bh();
+	t = rcu_dereference_bh(h->table);
+	key = HKEY(value, h->initval, t->htable_bits);
+	n = hbucket(t, key);
+	for (i = 0; i < n->pos; i++)
+		if (type_pf_data_equal(ahash_data(n, i), d)) {
+			ret = -IPSET_ERR_EXIST;
+			goto out;
+		}
+
+	ret = type_pf_elem_add(n, value);
+	if (ret != 0)
+		goto out;
+
+#ifdef IP_SET_HASH_WITH_NETS
+	add_cidr(h, d->cidr, HOST_MASK);
+#endif
+	h->elements++;
+out:
+	rcu_read_unlock_bh();
+	return ret;
+}
+
+/* Delete an element from the hash: swap it with the last element
+ * and free up space if possible.
+ */
+static int
+type_pf_del(struct ip_set *set, void *value, u32 timeout)
+{
+	struct ip_set_hash *h = set->data;
+	struct htable *t = h->table;
+	const struct type_pf_elem *d = value;
+	struct hbucket *n;
+	int i;
+	struct type_pf_elem *data;
+	u32 key;
+
+	key = HKEY(value, h->initval, t->htable_bits);
+	n = hbucket(t, key);
+	for (i = 0; i < n->pos; i++) {
+		data = ahash_data(n, i);
+		if (!type_pf_data_equal(data, d))
+			continue;
+		if (i != n->pos - 1)
+			/* Not last one */
+			type_pf_data_copy(data, ahash_data(n, n->pos - 1));
+
+		n->pos--;
+		h->elements--;
+#ifdef IP_SET_HASH_WITH_NETS
+		del_cidr(h, d->cidr, HOST_MASK);
+#endif
+		if (n->pos + AHASH_INIT_SIZE < n->size) {
+			void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
+					    * sizeof(struct type_pf_elem),
+					    GFP_ATOMIC);
+			if (!tmp)
+				return 0;
+			n->size -= AHASH_INIT_SIZE;
+			memcpy(tmp, n->value,
+			       n->size * sizeof(struct type_pf_elem));
+			kfree(n->value);
+			n->value = tmp;
+		}
+		return 0;
+	}
+
+	return -IPSET_ERR_EXIST;
+}
+
+#ifdef IP_SET_HASH_WITH_NETS
+
+/* Special test function which takes into account the different network
+ * sizes added to the set */
+static int
+type_pf_test_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
+{
+	struct ip_set_hash *h = set->data;
+	struct htable *t = h->table;
+	struct hbucket *n;
+	const struct type_pf_elem *data;
+	int i, j = 0;
+	u32 key;
+	u8 host_mask = SET_HOST_MASK(set->family);
+
+	pr_debug("test by nets\n");
+	for (; j < host_mask && h->nets[j].cidr; j++) {
+		type_pf_data_netmask(d, h->nets[j].cidr);
+		key = HKEY(d, h->initval, t->htable_bits);
+		n = hbucket(t, key);
+		for (i = 0; i < n->pos; i++) {
+			data = ahash_data(n, i);
+			if (type_pf_data_equal(data, d))
+				return 1;
+		}
+	}
+	return 0;
+}
+#endif
+
+/* Test whether the element is added to the set */
+static int
+type_pf_test(struct ip_set *set, void *value, u32 timeout)
+{
+	struct ip_set_hash *h = set->data;
+	struct htable *t = h->table;
+	struct type_pf_elem *d = value;
+	struct hbucket *n;
+	const struct type_pf_elem *data;
+	int i;
+	u32 key;
+
+#ifdef IP_SET_HASH_WITH_NETS
+	/* If we test an IP address and not a network address,
+	 * try all possible network sizes */
+	if (d->cidr == SET_HOST_MASK(set->family))
+		return type_pf_test_cidrs(set, d, timeout);
+#endif
+
+	key = HKEY(d, h->initval, t->htable_bits);
+	n = hbucket(t, key);
+	for (i = 0; i < n->pos; i++) {
+		data = ahash_data(n, i);
+		if (type_pf_data_equal(data, d))
+			return 1;
+	}
+	return 0;
+}
+
+/* Reply a HEADER request: fill out the header part of the set */
+static int
+type_pf_head(struct ip_set *set, struct sk_buff *skb)
+{
+	const struct ip_set_hash *h = set->data;
+	struct nlattr *nested;
+	size_t memsize;
+
+	read_lock_bh(&set->lock);
+	memsize = ahash_memsize(h, with_timeout(h->timeout)
+					? sizeof(struct type_pf_telem)
+					: sizeof(struct type_pf_elem),
+				set->family == AF_INET ? 32 : 128);
+	read_unlock_bh(&set->lock);
+
+	nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+	if (!nested)
+		goto nla_put_failure;
+	NLA_PUT_NET32(skb, IPSET_ATTR_HASHSIZE,
+		      htonl(jhash_size(h->table->htable_bits)));
+	NLA_PUT_NET32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem));
+#ifdef IP_SET_HASH_WITH_NETMASK
+	if (h->netmask != HOST_MASK)
+		NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask);
+#endif
+	NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+		      htonl(atomic_read(&set->ref) - 1));
+	NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize));
+	if (with_timeout(h->timeout))
+		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout));
+	ipset_nest_end(skb, nested);
+
+	return 0;
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+/* Reply a LIST/SAVE request: dump the elements of the specified set */
+static int
+type_pf_list(const struct ip_set *set,
+	     struct sk_buff *skb, struct netlink_callback *cb)
+{
+	const struct ip_set_hash *h = set->data;
+	const struct htable *t = h->table;
+	struct nlattr *atd, *nested;
+	const struct hbucket *n;
+	const struct type_pf_elem *data;
+	u32 first = cb->args[2];
+	/* We assume that one hash bucket fills into one page */
+	void *incomplete;
+	int i;
+
+	atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+	if (!atd)
+		return -EMSGSIZE;
+	pr_debug("list hash set %s\n", set->name);
+	for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) {
+		incomplete = skb_tail_pointer(skb);
+		n = hbucket(t, cb->args[2]);
+		pr_debug("cb->args[2]: %lu, t %p n %p\n", cb->args[2], t, n);
+		for (i = 0; i < n->pos; i++) {
+			data = ahash_data(n, i);
+			pr_debug("list hash %lu hbucket %p i %u, data %p\n",
+				 cb->args[2], n, i, data);
+			nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+			if (!nested) {
+				if (cb->args[2] == first) {
+					nla_nest_cancel(skb, atd);
+					return -EMSGSIZE;
+				} else
+					goto nla_put_failure;
+			}
+			if (type_pf_data_list(skb, data))
+				goto nla_put_failure;
+			ipset_nest_end(skb, nested);
+		}
+	}
+	ipset_nest_end(skb, atd);
+	/* Set listing finished */
+	cb->args[2] = 0;
+
+	return 0;
+
+nla_put_failure:
+	nlmsg_trim(skb, incomplete);
+	ipset_nest_end(skb, atd);
+	if (unlikely(first == cb->args[2])) {
+		pr_warning("Can't list set %s: one bucket does not fit into "
+			   "a message. Please report it!\n", set->name);
+		cb->args[2] = 0;
+		return -EMSGSIZE;
+	}
+	return 0;
+}
+
+static int
+type_pf_kadt(struct ip_set *set, const struct sk_buff * skb,
+	     enum ipset_adt adt, u8 pf, u8 dim, u8 flags);
+static int
+type_pf_uadt(struct ip_set *set, struct nlattr *tb[],
+	     enum ipset_adt adt, u32 *lineno, u32 flags);
+
+static const struct ip_set_type_variant type_pf_variant = {
+	.kadt	= type_pf_kadt,
+	.uadt	= type_pf_uadt,
+	.adt	= {
+		[IPSET_ADD] = type_pf_add,
+		[IPSET_DEL] = type_pf_del,
+		[IPSET_TEST] = type_pf_test,
+	},
+	.destroy = type_pf_destroy,
+	.flush	= type_pf_flush,
+	.head	= type_pf_head,
+	.list	= type_pf_list,
+	.resize	= type_pf_resize,
+	.same_set = type_pf_same_set,
+};
+
+/* Flavour with timeout support */
+
+#define ahash_tdata(n, i) \
+	(struct type_pf_elem *)((struct type_pf_telem *)((n)->value) + (i))
+
+static inline u32
+type_pf_data_timeout(const struct type_pf_elem *data)
+{
+	const struct type_pf_telem *tdata =
+		(const struct type_pf_telem *) data;
+
+	return tdata->timeout;
+}
+
+static inline bool
+type_pf_data_expired(const struct type_pf_elem *data)
+{
+	const struct type_pf_telem *tdata =
+		(const struct type_pf_telem *) data;
+
+	return ip_set_timeout_expired(tdata->timeout);
+}
+
+static inline void
+type_pf_data_timeout_set(struct type_pf_elem *data, u32 timeout)
+{
+	struct type_pf_telem *tdata = (struct type_pf_telem *) data;
+
+	tdata->timeout = ip_set_timeout_set(timeout);
+}
+
+static int
+type_pf_elem_tadd(struct hbucket *n, const struct type_pf_elem *value,
+		  u32 timeout)
+{
+	struct type_pf_elem *data;
+
+	if (n->pos >= n->size) {
+		void *tmp;
+
+		if (n->size >= AHASH_MAX_SIZE)
+			/* Trigger rehashing */
+			return -EAGAIN;
+
+		tmp = kzalloc((n->size + AHASH_INIT_SIZE)
+			      * sizeof(struct type_pf_telem),
+			      GFP_ATOMIC);
+		if (!tmp)
+			return -ENOMEM;
+		if (n->size) {
+			memcpy(tmp, n->value,
+			       sizeof(struct type_pf_telem) * n->size);
+			kfree(n->value);
+		}
+		n->value = tmp;
+		n->size += AHASH_INIT_SIZE;
+	}
+	data = ahash_tdata(n, n->pos++);
+	type_pf_data_copy(data, value);
+	type_pf_data_timeout_set(data, timeout);
+	return 0;
+}
+
+/* Delete expired elements from the hashtable */
+static void
+type_pf_expire(struct ip_set_hash *h)
+{
+	struct htable *t = h->table;
+	struct hbucket *n;
+	struct type_pf_elem *data;
+	u32 i;
+	int j;
+
+	for (i = 0; i < jhash_size(t->htable_bits); i++) {
+		n = hbucket(t, i);
+		for (j = 0; j < n->pos; j++) {
+			data = ahash_tdata(n, j);
+			if (type_pf_data_expired(data)) {
+				pr_debug("expired %u/%u\n", i, j);
+#ifdef IP_SET_HASH_WITH_NETS
+				del_cidr(h, data->cidr, HOST_MASK);
+#endif
+				if (j != n->pos - 1)
+					/* Not last one */
+					type_pf_data_copy(data,
+						ahash_tdata(n, n->pos - 1));
+				n->pos--;
+				h->elements--;
+			}
+		}
+		if (n->pos + AHASH_INIT_SIZE < n->size) {
+			void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
+					    * sizeof(struct type_pf_telem),
+					    GFP_ATOMIC);
+			if (!tmp)
+				/* Still try to delete expired elements */
+				continue;
+			n->size -= AHASH_INIT_SIZE;
+			memcpy(tmp, n->value,
+			       n->size * sizeof(struct type_pf_telem));
+			kfree(n->value);
+			n->value = tmp;
+		}
+	}
+}
+
+static int
+type_pf_tresize(struct ip_set *set, bool retried)
+{
+	struct ip_set_hash *h = set->data;
+	struct htable *t, *orig = h->table;
+	u8 htable_bits = orig->htable_bits;
+	const struct type_pf_elem *data;
+	struct hbucket *n, *m;
+	u32 i, j;
+	int ret;
+
+	/* Try to cleanup once */
+	if (!retried) {
+		i = h->elements;
+		write_lock_bh(&set->lock);
+		type_pf_expire(set->data);
+		write_unlock_bh(&set->lock);
+		if (h->elements <  i)
+			return 0;
+	}
+
+retry:
+	ret = 0;
+	htable_bits++;
+	if (!htable_bits)
+		/* In case we have plenty of memory :-) */
+		return -IPSET_ERR_HASH_FULL;
+	t = ip_set_alloc(sizeof(*t)
+			 + jhash_size(htable_bits) * sizeof(struct hbucket));
+	if (!t)
+		return -ENOMEM;
+	t->htable_bits = htable_bits;
+
+	read_lock_bh(&set->lock);
+	for (i = 0; i < jhash_size(orig->htable_bits); i++) {
+		n = hbucket(orig, i);
+		for (j = 0; j < n->pos; j++) {
+			data = ahash_tdata(n, j);
+			m = hbucket(t, HKEY(data, h->initval, htable_bits));
+			ret = type_pf_elem_tadd(m, data,
+						type_pf_data_timeout(data));
+			if (ret < 0) {
+				read_unlock_bh(&set->lock);
+				ahash_destroy(t);
+				if (ret == -EAGAIN)
+					goto retry;
+				return ret;
+			}
+		}
+	}
+
+	rcu_assign_pointer(h->table, t);
+	read_unlock_bh(&set->lock);
+
+	/* Give time to other readers of the set */
+	synchronize_rcu_bh();
+
+	ahash_destroy(orig);
+
+	return 0;
+}
+
+static int
+type_pf_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+	struct ip_set_hash *h = set->data;
+	struct htable *t = h->table;
+	const struct type_pf_elem *d = value;
+	struct hbucket *n;
+	struct type_pf_elem *data;
+	int ret = 0, i, j = AHASH_MAX_SIZE + 1;
+	u32 key;
+
+	if (h->elements >= h->maxelem)
+		/* FIXME: when set is full, we slow down here */
+		type_pf_expire(h);
+	if (h->elements >= h->maxelem)
+		return -IPSET_ERR_HASH_FULL;
+
+	rcu_read_lock_bh();
+	t = rcu_dereference_bh(h->table);
+	key = HKEY(d, h->initval, t->htable_bits);
+	n = hbucket(t, key);
+	for (i = 0; i < n->pos; i++) {
+		data = ahash_tdata(n, i);
+		if (type_pf_data_equal(data, d)) {
+			if (type_pf_data_expired(data))
+				j = i;
+			else {
+				ret = -IPSET_ERR_EXIST;
+				goto out;
+			}
+		} else if (j == AHASH_MAX_SIZE + 1 &&
+			   type_pf_data_expired(data))
+			j = i;
+	}
+	if (j != AHASH_MAX_SIZE + 1) {
+		data = ahash_tdata(n, j);
+#ifdef IP_SET_HASH_WITH_NETS
+		del_cidr(h, data->cidr, HOST_MASK);
+		add_cidr(h, d->cidr, HOST_MASK);
+#endif
+		type_pf_data_copy(data, d);
+		type_pf_data_timeout_set(data, timeout);
+		goto out;
+	}
+	ret = type_pf_elem_tadd(n, d, timeout);
+	if (ret != 0)
+		goto out;
+
+#ifdef IP_SET_HASH_WITH_NETS
+	add_cidr(h, d->cidr, HOST_MASK);
+#endif
+	h->elements++;
+out:
+	rcu_read_unlock_bh();
+	return ret;
+}
+
+static int
+type_pf_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+	struct ip_set_hash *h = set->data;
+	struct htable *t = h->table;
+	const struct type_pf_elem *d = value;
+	struct hbucket *n;
+	int i, ret = 0;
+	struct type_pf_elem *data;
+	u32 key;
+
+	key = HKEY(value, h->initval, t->htable_bits);
+	n = hbucket(t, key);
+	for (i = 0; i < n->pos; i++) {
+		data = ahash_tdata(n, i);
+		if (!type_pf_data_equal(data, d))
+			continue;
+		if (type_pf_data_expired(data))
+			ret = -IPSET_ERR_EXIST;
+		if (i != n->pos - 1)
+			/* Not last one */
+			type_pf_data_copy(data, ahash_tdata(n, n->pos - 1));
+
+		n->pos--;
+		h->elements--;
+#ifdef IP_SET_HASH_WITH_NETS
+		del_cidr(h, d->cidr, HOST_MASK);
+#endif
+		if (n->pos + AHASH_INIT_SIZE < n->size) {
+			void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
+					    * sizeof(struct type_pf_telem),
+					    GFP_ATOMIC);
+			if (!tmp)
+				return 0;
+			n->size -= AHASH_INIT_SIZE;
+			memcpy(tmp, n->value,
+			       n->size * sizeof(struct type_pf_telem));
+			kfree(n->value);
+			n->value = tmp;
+		}
+		return 0;
+	}
+
+	return -IPSET_ERR_EXIST;
+}
+
+#ifdef IP_SET_HASH_WITH_NETS
+static int
+type_pf_ttest_cidrs(struct ip_set *set, struct type_pf_elem *d, u32 timeout)
+{
+	struct ip_set_hash *h = set->data;
+	struct htable *t = h->table;
+	struct type_pf_elem *data;
+	struct hbucket *n;
+	int i, j = 0;
+	u32 key;
+	u8 host_mask = SET_HOST_MASK(set->family);
+
+	for (; j < host_mask && h->nets[j].cidr; j++) {
+		type_pf_data_netmask(d, h->nets[j].cidr);
+		key = HKEY(d, h->initval, t->htable_bits);
+		n = hbucket(t, key);
+		for (i = 0; i < n->pos; i++) {
+			data = ahash_tdata(n, i);
+			if (type_pf_data_equal(data, d))
+				return !type_pf_data_expired(data);
+		}
+	}
+	return 0;
+}
+#endif
+
+static int
+type_pf_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+	struct ip_set_hash *h = set->data;
+	struct htable *t = h->table;
+	struct type_pf_elem *data, *d = value;
+	struct hbucket *n;
+	int i;
+	u32 key;
+
+#ifdef IP_SET_HASH_WITH_NETS
+	if (d->cidr == SET_HOST_MASK(set->family))
+		return type_pf_ttest_cidrs(set, d, timeout);
+#endif
+	key = HKEY(d, h->initval, t->htable_bits);
+	n = hbucket(t, key);
+	for (i = 0; i < n->pos; i++) {
+		data = ahash_tdata(n, i);
+		if (type_pf_data_equal(data, d))
+			return !type_pf_data_expired(data);
+	}
+	return 0;
+}
+
+static int
+type_pf_tlist(const struct ip_set *set,
+	      struct sk_buff *skb, struct netlink_callback *cb)
+{
+	const struct ip_set_hash *h = set->data;
+	const struct htable *t = h->table;
+	struct nlattr *atd, *nested;
+	const struct hbucket *n;
+	const struct type_pf_elem *data;
+	u32 first = cb->args[2];
+	/* We assume that one hash bucket fills into one page */
+	void *incomplete;
+	int i;
+
+	atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+	if (!atd)
+		return -EMSGSIZE;
+	for (; cb->args[2] < jhash_size(t->htable_bits); cb->args[2]++) {
+		incomplete = skb_tail_pointer(skb);
+		n = hbucket(t, cb->args[2]);
+		for (i = 0; i < n->pos; i++) {
+			data = ahash_tdata(n, i);
+			pr_debug("list %p %u\n", n, i);
+			if (type_pf_data_expired(data))
+				continue;
+			pr_debug("do list %p %u\n", n, i);
+			nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+			if (!nested) {
+				if (cb->args[2] == first) {
+					nla_nest_cancel(skb, atd);
+					return -EMSGSIZE;
+				} else
+					goto nla_put_failure;
+			}
+			if (type_pf_data_tlist(skb, data))
+				goto nla_put_failure;
+			ipset_nest_end(skb, nested);
+		}
+	}
+	ipset_nest_end(skb, atd);
+	/* Set listing finished */
+	cb->args[2] = 0;
+
+	return 0;
+
+nla_put_failure:
+	nlmsg_trim(skb, incomplete);
+	ipset_nest_end(skb, atd);
+	if (unlikely(first == cb->args[2])) {
+		pr_warning("Can't list set %s: one bucket does not fit into "
+			   "a message. Please report it!\n", set->name);
+		cb->args[2] = 0;
+		return -EMSGSIZE;
+	}
+	return 0;
+}
+
+static const struct ip_set_type_variant type_pf_tvariant = {
+	.kadt	= type_pf_kadt,
+	.uadt	= type_pf_uadt,
+	.adt	= {
+		[IPSET_ADD] = type_pf_tadd,
+		[IPSET_DEL] = type_pf_tdel,
+		[IPSET_TEST] = type_pf_ttest,
+	},
+	.destroy = type_pf_destroy,
+	.flush	= type_pf_flush,
+	.head	= type_pf_head,
+	.list	= type_pf_tlist,
+	.resize	= type_pf_tresize,
+	.same_set = type_pf_same_set,
+};
+
+static void
+type_pf_gc(unsigned long ul_set)
+{
+	struct ip_set *set = (struct ip_set *) ul_set;
+	struct ip_set_hash *h = set->data;
+
+	pr_debug("called\n");
+	write_lock_bh(&set->lock);
+	type_pf_expire(h);
+	write_unlock_bh(&set->lock);
+
+	h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
+	add_timer(&h->gc);
+}
+
+static void
+type_pf_gc_init(struct ip_set *set)
+{
+	struct ip_set_hash *h = set->data;
+
+	init_timer(&h->gc);
+	h->gc.data = (unsigned long) set;
+	h->gc.function = type_pf_gc;
+	h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
+	add_timer(&h->gc);
+	pr_debug("gc initialized, run in every %u\n",
+		 IPSET_GC_PERIOD(h->timeout));
+}
+
+#undef type_pf_data_equal
+#undef type_pf_data_isnull
+#undef type_pf_data_copy
+#undef type_pf_data_zero_out
+#undef type_pf_data_list
+#undef type_pf_data_tlist
+
+#undef type_pf_elem
+#undef type_pf_telem
+#undef type_pf_data_timeout
+#undef type_pf_data_expired
+#undef type_pf_data_netmask
+#undef type_pf_data_timeout_set
+
+#undef type_pf_elem_add
+#undef type_pf_add
+#undef type_pf_del
+#undef type_pf_test_cidrs
+#undef type_pf_test
+
+#undef type_pf_elem_tadd
+#undef type_pf_expire
+#undef type_pf_tadd
+#undef type_pf_tdel
+#undef type_pf_ttest_cidrs
+#undef type_pf_ttest
+
+#undef type_pf_resize
+#undef type_pf_tresize
+#undef type_pf_flush
+#undef type_pf_destroy
+#undef type_pf_head
+#undef type_pf_list
+#undef type_pf_tlist
+#undef type_pf_same_set
+#undef type_pf_kadt
+#undef type_pf_uadt
+#undef type_pf_gc
+#undef type_pf_gc_init
+#undef type_pf_variant
+#undef type_pf_tvariant
diff --git a/include/linux/netfilter/ipset/ip_set_bitmap.h b/include/linux/netfilter/ipset/ip_set_bitmap.h
new file mode 100644
index 0000000..61a9e87
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_bitmap.h
@@ -0,0 +1,31 @@
+#ifndef __IP_SET_BITMAP_H
+#define __IP_SET_BITMAP_H
+
+/* Bitmap type specific error codes */
+enum {
+	/* The element is out of the range of the set */
+	IPSET_ERR_BITMAP_RANGE = IPSET_ERR_TYPE_SPECIFIC,
+	/* The range exceeds the size limit of the set type */
+	IPSET_ERR_BITMAP_RANGE_SIZE,
+};
+
+#ifdef __KERNEL__
+#define IPSET_BITMAP_MAX_RANGE	0x0000FFFF
+
+/* Common functions */
+
+static inline u32
+range_to_mask(u32 from, u32 to, u8 *bits)
+{
+	u32 mask = 0xFFFFFFFE;
+
+	*bits = 32;
+	while (--(*bits) > 0 && mask && (to & mask) != from)
+		mask <<= 1;
+
+	return mask;
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* __IP_SET_BITMAP_H */
diff --git a/include/linux/netfilter/ipset/ip_set_getport.h b/include/linux/netfilter/ipset/ip_set_getport.h
new file mode 100644
index 0000000..3882a81
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_getport.h
@@ -0,0 +1,21 @@
+#ifndef _IP_SET_GETPORT_H
+#define _IP_SET_GETPORT_H
+
+extern bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
+				__be16 *port, u8 *proto);
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+extern bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
+				__be16 *port, u8 *proto);
+#else
+static inline bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
+				       __be16 *port, u8 *proto)
+{
+	return false;
+}
+#endif
+
+extern bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src,
+				__be16 *port);
+
+#endif /*_IP_SET_GETPORT_H*/
diff --git a/include/linux/netfilter/ipset/ip_set_hash.h b/include/linux/netfilter/ipset/ip_set_hash.h
new file mode 100644
index 0000000..b86f15c
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_hash.h
@@ -0,0 +1,26 @@
+#ifndef __IP_SET_HASH_H
+#define __IP_SET_HASH_H
+
+/* Hash type specific error codes */
+enum {
+	/* Hash is full */
+	IPSET_ERR_HASH_FULL = IPSET_ERR_TYPE_SPECIFIC,
+	/* Null-valued element */
+	IPSET_ERR_HASH_ELEM,
+	/* Invalid protocol */
+	IPSET_ERR_INVALID_PROTO,
+	/* Protocol missing but must be specified */
+	IPSET_ERR_MISSING_PROTO,
+};
+
+#ifdef __KERNEL__
+
+#define IPSET_DEFAULT_HASHSIZE		1024
+#define IPSET_MIMINAL_HASHSIZE		64
+#define IPSET_DEFAULT_MAXELEM		65536
+#define IPSET_DEFAULT_PROBES		4
+#define IPSET_DEFAULT_RESIZE		100
+
+#endif /* __KERNEL__ */
+
+#endif /* __IP_SET_HASH_H */
diff --git a/include/linux/netfilter/ipset/ip_set_list.h b/include/linux/netfilter/ipset/ip_set_list.h
new file mode 100644
index 0000000..40a63f3
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_list.h
@@ -0,0 +1,27 @@
+#ifndef __IP_SET_LIST_H
+#define __IP_SET_LIST_H
+
+/* List type specific error codes */
+enum {
+	/* Set name to be added/deleted/tested does not exist. */
+	IPSET_ERR_NAME = IPSET_ERR_TYPE_SPECIFIC,
+	/* list:set type is not permitted to add */
+	IPSET_ERR_LOOP,
+	/* Missing reference set */
+	IPSET_ERR_BEFORE,
+	/* Reference set does not exist */
+	IPSET_ERR_NAMEREF,
+	/* Set is full */
+	IPSET_ERR_LIST_FULL,
+	/* Reference set is not added to the set */
+	IPSET_ERR_REF_EXIST,
+};
+
+#ifdef __KERNEL__
+
+#define IP_SET_LIST_DEFAULT_SIZE	8
+#define IP_SET_LIST_MIN_SIZE		4
+
+#endif /* __KERNEL__ */
+
+#endif /* __IP_SET_LIST_H */
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
new file mode 100644
index 0000000..9f30c5f
--- /dev/null
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -0,0 +1,127 @@
+#ifndef _IP_SET_TIMEOUT_H
+#define _IP_SET_TIMEOUT_H
+
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef __KERNEL__
+
+/* How often should the gc be run by default */
+#define IPSET_GC_TIME			(3 * 60)
+
+/* Timeout period depending on the timeout value of the given set */
+#define IPSET_GC_PERIOD(timeout) \
+	((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1)
+
+/* Set is defined without timeout support: timeout value may be 0 */
+#define IPSET_NO_TIMEOUT	UINT_MAX
+
+#define with_timeout(timeout)	((timeout) != IPSET_NO_TIMEOUT)
+
+static inline unsigned int
+ip_set_timeout_uget(struct nlattr *tb)
+{
+	unsigned int timeout = ip_set_get_h32(tb);
+
+	/* Userspace supplied TIMEOUT parameter: adjust crazy size */
+	return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout;
+}
+
+#ifdef IP_SET_BITMAP_TIMEOUT
+
+/* Bitmap specific timeout constants and macros for the entries */
+
+/* Bitmap entry is unset */
+#define IPSET_ELEM_UNSET	0
+/* Bitmap entry is set with no timeout value */
+#define IPSET_ELEM_PERMANENT	(UINT_MAX/2)
+
+static inline bool
+ip_set_timeout_test(unsigned long timeout)
+{
+	return timeout != IPSET_ELEM_UNSET &&
+	       (timeout == IPSET_ELEM_PERMANENT ||
+		time_after(timeout, jiffies));
+}
+
+static inline bool
+ip_set_timeout_expired(unsigned long timeout)
+{
+	return timeout != IPSET_ELEM_UNSET &&
+	       timeout != IPSET_ELEM_PERMANENT &&
+	       time_before(timeout, jiffies);
+}
+
+static inline unsigned long
+ip_set_timeout_set(u32 timeout)
+{
+	unsigned long t;
+
+	if (!timeout)
+		return IPSET_ELEM_PERMANENT;
+
+	t = timeout * HZ + jiffies;
+	if (t == IPSET_ELEM_UNSET || t == IPSET_ELEM_PERMANENT)
+		/* Bingo! */
+		t++;
+
+	return t;
+}
+
+static inline u32
+ip_set_timeout_get(unsigned long timeout)
+{
+	return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ;
+}
+
+#else
+
+/* Hash specific timeout constants and macros for the entries */
+
+/* Hash entry is set with no timeout value */
+#define IPSET_ELEM_PERMANENT	0
+
+static inline bool
+ip_set_timeout_test(unsigned long timeout)
+{
+	return timeout == IPSET_ELEM_PERMANENT ||
+	       time_after(timeout, jiffies);
+}
+
+static inline bool
+ip_set_timeout_expired(unsigned long timeout)
+{
+	return timeout != IPSET_ELEM_PERMANENT &&
+	       time_before(timeout, jiffies);
+}
+
+static inline unsigned long
+ip_set_timeout_set(u32 timeout)
+{
+	unsigned long t;
+
+	if (!timeout)
+		return IPSET_ELEM_PERMANENT;
+
+	t = timeout * HZ + jiffies;
+	if (t == IPSET_ELEM_PERMANENT)
+		/* Bingo! :-) */
+		t++;
+
+	return t;
+}
+
+static inline u32
+ip_set_timeout_get(unsigned long timeout)
+{
+	return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ;
+}
+#endif /* ! IP_SET_BITMAP_TIMEOUT */
+
+#endif	/* __KERNEL__ */
+
+#endif /* _IP_SET_TIMEOUT_H */
diff --git a/include/linux/netfilter/ipset/pfxlen.h b/include/linux/netfilter/ipset/pfxlen.h
new file mode 100644
index 0000000..0e1fb50
--- /dev/null
+++ b/include/linux/netfilter/ipset/pfxlen.h
@@ -0,0 +1,35 @@
+#ifndef _PFXLEN_H
+#define _PFXLEN_H
+
+#include <asm/byteorder.h>
+#include <linux/netfilter.h> 
+
+/* Prefixlen maps, by Jan Engelhardt  */
+extern const union nf_inet_addr ip_set_netmask_map[];
+extern const union nf_inet_addr ip_set_hostmask_map[];
+
+static inline __be32
+ip_set_netmask(u8 pfxlen)
+{
+	return ip_set_netmask_map[pfxlen].ip;
+}
+
+static inline const __be32 *
+ip_set_netmask6(u8 pfxlen)
+{
+	return &ip_set_netmask_map[pfxlen].ip6[0];
+}
+
+static inline u32
+ip_set_hostmask(u8 pfxlen)
+{
+	return (__force u32) ip_set_hostmask_map[pfxlen].ip;
+}
+
+static inline const __be32 *
+ip_set_hostmask6(u8 pfxlen)
+{
+	return &ip_set_hostmask_map[pfxlen].ip6[0];
+}
+
+#endif /*_PFXLEN_H */
diff --git a/include/linux/netfilter/nf_conntrack_snmp.h b/include/linux/netfilter/nf_conntrack_snmp.h
new file mode 100644
index 0000000..064bc63
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_snmp.h
@@ -0,0 +1,9 @@
+#ifndef _NF_CONNTRACK_SNMP_H
+#define _NF_CONNTRACK_SNMP_H
+
+extern int (*nf_nat_snmp_hook)(struct sk_buff *skb,
+				unsigned int protoff,
+				struct nf_conn *ct,
+				enum ip_conntrack_info ctinfo);
+
+#endif /* _NF_CONNTRACK_SNMP_H */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 361d6b5..2b11fc1 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -47,7 +47,8 @@
 #define NFNL_SUBSYS_QUEUE		3
 #define NFNL_SUBSYS_ULOG		4
 #define NFNL_SUBSYS_OSF			5
-#define NFNL_SUBSYS_COUNT		6
+#define NFNL_SUBSYS_IPSET		6
+#define NFNL_SUBSYS_COUNT		7
 
 #ifdef __KERNEL__
 
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h
index 19711e3..debf1ae 100644
--- a/include/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/linux/netfilter/nfnetlink_conntrack.h
@@ -42,6 +42,7 @@
 	CTA_SECMARK,		/* obsolete */
 	CTA_ZONE,
 	CTA_SECCTX,
+	CTA_TIMESTAMP,
 	__CTA_MAX
 };
 #define CTA_MAX (__CTA_MAX - 1)
@@ -127,6 +128,14 @@
 };
 #define CTA_COUNTERS_MAX (__CTA_COUNTERS_MAX - 1)
 
+enum ctattr_tstamp {
+	CTA_TIMESTAMP_UNSPEC,
+	CTA_TIMESTAMP_START,
+	CTA_TIMESTAMP_STOP,
+	__CTA_TIMESTAMP_MAX
+};
+#define CTA_TIMESTAMP_MAX (__CTA_TIMESTAMP_MAX - 1)
+
 enum ctattr_nat {
 	CTA_NAT_UNSPEC,
 	CTA_NAT_MINIP,
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 6712e71..3721952 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -611,8 +611,9 @@
 extern void xt_compat_lock(u_int8_t af);
 extern void xt_compat_unlock(u_int8_t af);
 
-extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta);
+extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
 extern void xt_compat_flush_offsets(u_int8_t af);
+extern void xt_compat_init_offsets(u_int8_t af, unsigned int number);
 extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
 
 extern int xt_compat_match_offset(const struct xt_match *match);
diff --git a/include/linux/netfilter/xt_AUDIT.h b/include/linux/netfilter/xt_AUDIT.h
new file mode 100644
index 0000000..38751d2
--- /dev/null
+++ b/include/linux/netfilter/xt_AUDIT.h
@@ -0,0 +1,30 @@
+/*
+ * Header file for iptables xt_AUDIT target
+ *
+ * (C) 2010-2011 Thomas Graf <tgraf@redhat.com>
+ * (C) 2010-2011 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _XT_AUDIT_TARGET_H
+#define _XT_AUDIT_TARGET_H
+
+#include <linux/types.h>
+
+enum {
+	XT_AUDIT_TYPE_ACCEPT = 0,
+	XT_AUDIT_TYPE_DROP,
+	XT_AUDIT_TYPE_REJECT,
+	__XT_AUDIT_TYPE_MAX,
+};
+
+#define XT_AUDIT_TYPE_MAX (__XT_AUDIT_TYPE_MAX - 1)
+
+struct xt_audit_info {
+	__u8 type; /* XT_AUDIT_TYPE_* */
+};
+
+#endif /* _XT_AUDIT_TARGET_H */
diff --git a/include/linux/netfilter/xt_CT.h b/include/linux/netfilter/xt_CT.h
index 1b56410..b56e768 100644
--- a/include/linux/netfilter/xt_CT.h
+++ b/include/linux/netfilter/xt_CT.h
@@ -1,14 +1,16 @@
 #ifndef _XT_CT_H
 #define _XT_CT_H
 
+#include <linux/types.h>
+
 #define XT_CT_NOTRACK	0x1
 
 struct xt_ct_target_info {
-	u_int16_t	flags;
-	u_int16_t	zone;
-	u_int32_t	ct_events;
-	u_int32_t	exp_events;
-	char		helper[16];
+	__u16 flags;
+	__u16 zone;
+	__u32 ct_events;
+	__u32 exp_events;
+	char helper[16];
 
 	/* Used internally by the kernel */
 	struct nf_conn	*ct __attribute__((aligned(8)));
diff --git a/include/linux/netfilter/xt_NFQUEUE.h b/include/linux/netfilter/xt_NFQUEUE.h
index 2584f4a..9eafdbb 100644
--- a/include/linux/netfilter/xt_NFQUEUE.h
+++ b/include/linux/netfilter/xt_NFQUEUE.h
@@ -20,4 +20,10 @@
 	__u16 queues_total;
 };
 
+struct xt_NFQ_info_v2 {
+	__u16 queuenum;
+	__u16 queues_total;
+	__u16 bypass;
+};
+
 #endif /* _XT_NFQ_TARGET_H */
diff --git a/include/linux/netfilter/xt_TCPOPTSTRIP.h b/include/linux/netfilter/xt_TCPOPTSTRIP.h
index 2db5432..7157318 100644
--- a/include/linux/netfilter/xt_TCPOPTSTRIP.h
+++ b/include/linux/netfilter/xt_TCPOPTSTRIP.h
@@ -1,13 +1,15 @@
 #ifndef _XT_TCPOPTSTRIP_H
 #define _XT_TCPOPTSTRIP_H
 
+#include <linux/types.h>
+
 #define tcpoptstrip_set_bit(bmap, idx) \
 	(bmap[(idx) >> 5] |= 1U << (idx & 31))
 #define tcpoptstrip_test_bit(bmap, idx) \
 	(((1U << (idx & 31)) & bmap[(idx) >> 5]) != 0)
 
 struct xt_tcpoptstrip_target_info {
-	u_int32_t strip_bmap[8];
+	__u32 strip_bmap[8];
 };
 
 #endif /* _XT_TCPOPTSTRIP_H */
diff --git a/include/linux/netfilter/xt_TPROXY.h b/include/linux/netfilter/xt_TPROXY.h
index 3f3d693..902043c 100644
--- a/include/linux/netfilter/xt_TPROXY.h
+++ b/include/linux/netfilter/xt_TPROXY.h
@@ -1,19 +1,21 @@
 #ifndef _XT_TPROXY_H
 #define _XT_TPROXY_H
 
+#include <linux/types.h>
+
 /* TPROXY target is capable of marking the packet to perform
  * redirection. We can get rid of that whenever we get support for
  * mutliple targets in the same rule. */
 struct xt_tproxy_target_info {
-	u_int32_t mark_mask;
-	u_int32_t mark_value;
+	__u32 mark_mask;
+	__u32 mark_value;
 	__be32 laddr;
 	__be16 lport;
 };
 
 struct xt_tproxy_target_info_v1 {
-	u_int32_t mark_mask;
-	u_int32_t mark_value;
+	__u32 mark_mask;
+	__u32 mark_value;
 	union nf_inet_addr laddr;
 	__be16 lport;
 };
diff --git a/include/linux/netfilter/xt_cluster.h b/include/linux/netfilter/xt_cluster.h
index 8866826..9b883c8 100644
--- a/include/linux/netfilter/xt_cluster.h
+++ b/include/linux/netfilter/xt_cluster.h
@@ -1,15 +1,17 @@
 #ifndef _XT_CLUSTER_MATCH_H
 #define _XT_CLUSTER_MATCH_H
 
+#include <linux/types.h>
+
 enum xt_cluster_flags {
 	XT_CLUSTER_F_INV	= (1 << 0)
 };
 
 struct xt_cluster_match_info {
-	u_int32_t		total_nodes;
-	u_int32_t		node_mask;
-	u_int32_t		hash_seed;
-	u_int32_t		flags;
+	__u32 total_nodes;
+	__u32 node_mask;
+	__u32 hash_seed;
+	__u32 flags;
 };
 
 #define XT_CLUSTER_NODES_MAX	32
diff --git a/include/linux/netfilter/xt_comment.h b/include/linux/netfilter/xt_comment.h
index eacfedc..0ea5e79 100644
--- a/include/linux/netfilter/xt_comment.h
+++ b/include/linux/netfilter/xt_comment.h
@@ -4,7 +4,7 @@
 #define XT_MAX_COMMENT_LEN 256
 
 struct xt_comment_info {
-	unsigned char comment[XT_MAX_COMMENT_LEN];
+	char comment[XT_MAX_COMMENT_LEN];
 };
 
 #endif /* XT_COMMENT_H */
diff --git a/include/linux/netfilter/xt_connlimit.h b/include/linux/netfilter/xt_connlimit.h
index 7e3284b..0ca66e9 100644
--- a/include/linux/netfilter/xt_connlimit.h
+++ b/include/linux/netfilter/xt_connlimit.h
@@ -1,8 +1,15 @@
 #ifndef _XT_CONNLIMIT_H
 #define _XT_CONNLIMIT_H
 
+#include <linux/types.h>
+
 struct xt_connlimit_data;
 
+enum {
+	XT_CONNLIMIT_INVERT = 1 << 0,
+	XT_CONNLIMIT_DADDR  = 1 << 1,
+};
+
 struct xt_connlimit_info {
 	union {
 		union nf_inet_addr mask;
@@ -13,7 +20,14 @@
 		};
 #endif
 	};
-	unsigned int limit, inverse;
+	unsigned int limit;
+	union {
+		/* revision 0 */
+		unsigned int inverse;
+
+		/* revision 1 */
+		__u32 flags;
+	};
 
 	/* Used internally by the kernel */
 	struct xt_connlimit_data *data __attribute__((aligned(8)));
diff --git a/include/linux/netfilter/xt_conntrack.h b/include/linux/netfilter/xt_conntrack.h
index 54f47a2..74b904d 100644
--- a/include/linux/netfilter/xt_conntrack.h
+++ b/include/linux/netfilter/xt_conntrack.h
@@ -58,4 +58,19 @@
 	__u16 state_mask, status_mask;
 };
 
+struct xt_conntrack_mtinfo3 {
+	union nf_inet_addr origsrc_addr, origsrc_mask;
+	union nf_inet_addr origdst_addr, origdst_mask;
+	union nf_inet_addr replsrc_addr, replsrc_mask;
+	union nf_inet_addr repldst_addr, repldst_mask;
+	__u32 expires_min, expires_max;
+	__u16 l4proto;
+	__u16 origsrc_port, origdst_port;
+	__u16 replsrc_port, repldst_port;
+	__u16 match_flags, invert_flags;
+	__u16 state_mask, status_mask;
+	__u16 origsrc_port_high, origdst_port_high;
+	__u16 replsrc_port_high, repldst_port_high;
+};
+
 #endif /*_XT_CONNTRACK_H*/
diff --git a/include/linux/netfilter/xt_devgroup.h b/include/linux/netfilter/xt_devgroup.h
new file mode 100644
index 0000000..1babde0
--- /dev/null
+++ b/include/linux/netfilter/xt_devgroup.h
@@ -0,0 +1,21 @@
+#ifndef _XT_DEVGROUP_H
+#define _XT_DEVGROUP_H
+
+#include <linux/types.h>
+
+enum xt_devgroup_flags {
+	XT_DEVGROUP_MATCH_SRC	= 0x1,
+	XT_DEVGROUP_INVERT_SRC	= 0x2,
+	XT_DEVGROUP_MATCH_DST	= 0x4,
+	XT_DEVGROUP_INVERT_DST	= 0x8,
+};
+
+struct xt_devgroup_info {
+	__u32	flags;
+	__u32	src_group;
+	__u32	src_mask;
+	__u32	dst_group;
+	__u32	dst_mask;
+};
+
+#endif /* _XT_DEVGROUP_H */
diff --git a/include/linux/netfilter/xt_quota.h b/include/linux/netfilter/xt_quota.h
index b0d28c6..ca6e03e 100644
--- a/include/linux/netfilter/xt_quota.h
+++ b/include/linux/netfilter/xt_quota.h
@@ -1,6 +1,8 @@
 #ifndef _XT_QUOTA_H
 #define _XT_QUOTA_H
 
+#include <linux/types.h>
+
 enum xt_quota_flags {
 	XT_QUOTA_INVERT		= 0x1,
 };
@@ -9,9 +11,9 @@
 struct xt_quota_priv;
 
 struct xt_quota_info {
-	u_int32_t		flags;
-	u_int32_t		pad;
-	aligned_u64		quota;
+	__u32 flags;
+	__u32 pad;
+	aligned_u64 quota;
 
 	/* Used internally by the kernel */
 	struct xt_quota_priv	*master;
diff --git a/include/linux/netfilter/xt_set.h b/include/linux/netfilter/xt_set.h
new file mode 100644
index 0000000..081f1de
--- /dev/null
+++ b/include/linux/netfilter/xt_set.h
@@ -0,0 +1,56 @@
+#ifndef _XT_SET_H
+#define _XT_SET_H
+
+#include <linux/types.h>
+#include <linux/netfilter/ipset/ip_set.h>
+
+/* Revision 0 interface: backward compatible with netfilter/iptables */
+
+/*
+ * Option flags for kernel operations (xt_set_info_v0)
+ */
+#define IPSET_SRC		0x01	/* Source match/add */
+#define IPSET_DST		0x02	/* Destination match/add */
+#define IPSET_MATCH_INV		0x04	/* Inverse matching */
+
+struct xt_set_info_v0 {
+	ip_set_id_t index;
+	union {
+		__u32 flags[IPSET_DIM_MAX + 1];
+		struct {
+			__u32 __flags[IPSET_DIM_MAX];
+			__u8 dim;
+			__u8 flags;
+		} compat;
+	} u;
+};
+
+/* match and target infos */
+struct xt_set_info_match_v0 {
+	struct xt_set_info_v0 match_set;
+};
+
+struct xt_set_info_target_v0 {
+	struct xt_set_info_v0 add_set;
+	struct xt_set_info_v0 del_set;
+};
+
+/* Revision 1: current interface to netfilter/iptables */
+
+struct xt_set_info {
+	ip_set_id_t index;
+	__u8 dim;
+	__u8 flags;
+};
+
+/* match and target infos */
+struct xt_set_info_match {
+	struct xt_set_info match_set;
+};
+
+struct xt_set_info_target {
+	struct xt_set_info add_set;
+	struct xt_set_info del_set;
+};
+
+#endif /*_XT_SET_H*/
diff --git a/include/linux/netfilter/xt_socket.h b/include/linux/netfilter/xt_socket.h
index 6f475b8..26d7217 100644
--- a/include/linux/netfilter/xt_socket.h
+++ b/include/linux/netfilter/xt_socket.h
@@ -1,6 +1,8 @@
 #ifndef _XT_SOCKET_H
 #define _XT_SOCKET_H
 
+#include <linux/types.h>
+
 enum {
 	XT_SOCKET_TRANSPARENT = 1 << 0,
 };
diff --git a/include/linux/netfilter/xt_time.h b/include/linux/netfilter/xt_time.h
index 14b6df4..7c37fac 100644
--- a/include/linux/netfilter/xt_time.h
+++ b/include/linux/netfilter/xt_time.h
@@ -1,14 +1,16 @@
 #ifndef _XT_TIME_H
 #define _XT_TIME_H 1
 
+#include <linux/types.h>
+
 struct xt_time_info {
-	u_int32_t date_start;
-	u_int32_t date_stop;
-	u_int32_t daytime_start;
-	u_int32_t daytime_stop;
-	u_int32_t monthdays_match;
-	u_int8_t weekdays_match;
-	u_int8_t flags;
+	__u32 date_start;
+	__u32 date_stop;
+	__u32 daytime_start;
+	__u32 daytime_stop;
+	__u32 monthdays_match;
+	__u8 weekdays_match;
+	__u8 flags;
 };
 
 enum {
diff --git a/include/linux/netfilter/xt_u32.h b/include/linux/netfilter/xt_u32.h
index 9947f56..04d1bfe 100644
--- a/include/linux/netfilter/xt_u32.h
+++ b/include/linux/netfilter/xt_u32.h
@@ -1,6 +1,8 @@
 #ifndef _XT_U32_H
 #define _XT_U32_H 1
 
+#include <linux/types.h>
+
 enum xt_u32_ops {
 	XT_U32_AND,
 	XT_U32_LEFTSH,
@@ -9,13 +11,13 @@
 };
 
 struct xt_u32_location_element {
-	u_int32_t number;
-	u_int8_t nextop;
+	__u32 number;
+	__u8 nextop;
 };
 
 struct xt_u32_value_element {
-	u_int32_t min;
-	u_int32_t max;
+	__u32 min;
+	__u32 max;
 };
 
 /*
@@ -27,14 +29,14 @@
 struct xt_u32_test {
 	struct xt_u32_location_element location[XT_U32_MAXSIZE+1];
 	struct xt_u32_value_element value[XT_U32_MAXSIZE+1];
-	u_int8_t nnums;
-	u_int8_t nvalues;
+	__u8 nnums;
+	__u8 nvalues;
 };
 
 struct xt_u32 {
 	struct xt_u32_test tests[XT_U32_MAXSIZE+1];
-	u_int8_t ntests;
-	u_int8_t invert;
+	__u8 ntests;
+	__u8 invert;
 };
 
 #endif /* _XT_U32_H */
diff --git a/include/linux/netfilter_bridge/ebt_802_3.h b/include/linux/netfilter_bridge/ebt_802_3.h
index c73ef0b..be5be15 100644
--- a/include/linux/netfilter_bridge/ebt_802_3.h
+++ b/include/linux/netfilter_bridge/ebt_802_3.h
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_802_3_H
 #define __LINUX_BRIDGE_EBT_802_3_H
 
+#include <linux/types.h>
+
 #define EBT_802_3_SAP 0x01
 #define EBT_802_3_TYPE 0x02
 
@@ -24,24 +26,24 @@
 
 /* ui has one byte ctrl, ni has two */
 struct hdr_ui {
-	uint8_t dsap;
-	uint8_t ssap;
-	uint8_t ctrl;
-	uint8_t orig[3];
+	__u8 dsap;
+	__u8 ssap;
+	__u8 ctrl;
+	__u8 orig[3];
 	__be16 type;
 };
 
 struct hdr_ni {
-	uint8_t dsap;
-	uint8_t ssap;
+	__u8 dsap;
+	__u8 ssap;
 	__be16 ctrl;
-	uint8_t  orig[3];
+	__u8  orig[3];
 	__be16 type;
 };
 
 struct ebt_802_3_hdr {
-	uint8_t  daddr[6];
-	uint8_t  saddr[6];
+	__u8  daddr[6];
+	__u8  saddr[6];
 	__be16 len;
 	union {
 		struct hdr_ui ui;
@@ -59,10 +61,10 @@
 #endif
 
 struct ebt_802_3_info {
-	uint8_t  sap;
+	__u8  sap;
 	__be16 type;
-	uint8_t  bitmask;
-	uint8_t  invflags;
+	__u8  bitmask;
+	__u8  invflags;
 };
 
 #endif
diff --git a/include/linux/netfilter_bridge/ebt_among.h b/include/linux/netfilter_bridge/ebt_among.h
index 0009558..bd4e3ad 100644
--- a/include/linux/netfilter_bridge/ebt_among.h
+++ b/include/linux/netfilter_bridge/ebt_among.h
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_AMONG_H
 #define __LINUX_BRIDGE_EBT_AMONG_H
 
+#include <linux/types.h>
+
 #define EBT_AMONG_DST 0x01
 #define EBT_AMONG_SRC 0x02
 
@@ -30,7 +32,7 @@
  */
 
 struct ebt_mac_wormhash_tuple {
-	uint32_t cmp[2];
+	__u32 cmp[2];
 	__be32 ip;
 };
 
diff --git a/include/linux/netfilter_bridge/ebt_arp.h b/include/linux/netfilter_bridge/ebt_arp.h
index cbf4843..522f3e4 100644
--- a/include/linux/netfilter_bridge/ebt_arp.h
+++ b/include/linux/netfilter_bridge/ebt_arp.h
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_ARP_H
 #define __LINUX_BRIDGE_EBT_ARP_H
 
+#include <linux/types.h>
+
 #define EBT_ARP_OPCODE 0x01
 #define EBT_ARP_HTYPE 0x02
 #define EBT_ARP_PTYPE 0x04
@@ -27,8 +29,8 @@
 	unsigned char smmsk[ETH_ALEN];
 	unsigned char dmaddr[ETH_ALEN];
 	unsigned char dmmsk[ETH_ALEN];
-	uint8_t  bitmask;
-	uint8_t  invflags;
+	__u8  bitmask;
+	__u8  invflags;
 };
 
 #endif
diff --git a/include/linux/netfilter_bridge/ebt_ip.h b/include/linux/netfilter_bridge/ebt_ip.h
index 6a708fb..c4bbc41 100644
--- a/include/linux/netfilter_bridge/ebt_ip.h
+++ b/include/linux/netfilter_bridge/ebt_ip.h
@@ -15,6 +15,8 @@
 #ifndef __LINUX_BRIDGE_EBT_IP_H
 #define __LINUX_BRIDGE_EBT_IP_H
 
+#include <linux/types.h>
+
 #define EBT_IP_SOURCE 0x01
 #define EBT_IP_DEST 0x02
 #define EBT_IP_TOS 0x04
@@ -31,12 +33,12 @@
 	__be32 daddr;
 	__be32 smsk;
 	__be32 dmsk;
-	uint8_t  tos;
-	uint8_t  protocol;
-	uint8_t  bitmask;
-	uint8_t  invflags;
-	uint16_t sport[2];
-	uint16_t dport[2];
+	__u8  tos;
+	__u8  protocol;
+	__u8  bitmask;
+	__u8  invflags;
+	__u16 sport[2];
+	__u16 dport[2];
 };
 
 #endif
diff --git a/include/linux/netfilter_bridge/ebt_ip6.h b/include/linux/netfilter_bridge/ebt_ip6.h
index e5de987..42b8896 100644
--- a/include/linux/netfilter_bridge/ebt_ip6.h
+++ b/include/linux/netfilter_bridge/ebt_ip6.h
@@ -12,14 +12,19 @@
 #ifndef __LINUX_BRIDGE_EBT_IP6_H
 #define __LINUX_BRIDGE_EBT_IP6_H
 
+#include <linux/types.h>
+
 #define EBT_IP6_SOURCE 0x01
 #define EBT_IP6_DEST 0x02
 #define EBT_IP6_TCLASS 0x04
 #define EBT_IP6_PROTO 0x08
 #define EBT_IP6_SPORT 0x10
 #define EBT_IP6_DPORT 0x20
+#define EBT_IP6_ICMP6 0x40
+
 #define EBT_IP6_MASK (EBT_IP6_SOURCE | EBT_IP6_DEST | EBT_IP6_TCLASS |\
-		      EBT_IP6_PROTO | EBT_IP6_SPORT | EBT_IP6_DPORT)
+		      EBT_IP6_PROTO | EBT_IP6_SPORT | EBT_IP6_DPORT | \
+		      EBT_IP6_ICMP6)
 #define EBT_IP6_MATCH "ip6"
 
 /* the same values are used for the invflags */
@@ -28,12 +33,18 @@
 	struct in6_addr daddr;
 	struct in6_addr smsk;
 	struct in6_addr dmsk;
-	uint8_t  tclass;
-	uint8_t  protocol;
-	uint8_t  bitmask;
-	uint8_t  invflags;
-	uint16_t sport[2];
-	uint16_t dport[2];
+	__u8  tclass;
+	__u8  protocol;
+	__u8  bitmask;
+	__u8  invflags;
+	union {
+		__u16 sport[2];
+		__u8 icmpv6_type[2];
+	};
+	union {
+		__u16 dport[2];
+		__u8 icmpv6_code[2];
+	};
 };
 
 #endif
diff --git a/include/linux/netfilter_bridge/ebt_limit.h b/include/linux/netfilter_bridge/ebt_limit.h
index 4bf76b7..66d80b3 100644
--- a/include/linux/netfilter_bridge/ebt_limit.h
+++ b/include/linux/netfilter_bridge/ebt_limit.h
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_LIMIT_H
 #define __LINUX_BRIDGE_EBT_LIMIT_H
 
+#include <linux/types.h>
+
 #define EBT_LIMIT_MATCH "limit"
 
 /* timings are in milliseconds. */
@@ -10,13 +12,13 @@
    seconds, or one every 59 hours. */
 
 struct ebt_limit_info {
-	u_int32_t avg;    /* Average secs between packets * scale */
-	u_int32_t burst;  /* Period multiplier for upper limit. */
+	__u32 avg;    /* Average secs between packets * scale */
+	__u32 burst;  /* Period multiplier for upper limit. */
 
 	/* Used internally by the kernel */
 	unsigned long prev;
-	u_int32_t credit;
-	u_int32_t credit_cap, cost;
+	__u32 credit;
+	__u32 credit_cap, cost;
 };
 
 #endif
diff --git a/include/linux/netfilter_bridge/ebt_log.h b/include/linux/netfilter_bridge/ebt_log.h
index cc2cdfb..7e7f1d1 100644
--- a/include/linux/netfilter_bridge/ebt_log.h
+++ b/include/linux/netfilter_bridge/ebt_log.h
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_LOG_H
 #define __LINUX_BRIDGE_EBT_LOG_H
 
+#include <linux/types.h>
+
 #define EBT_LOG_IP 0x01 /* if the frame is made by ip, log the ip information */
 #define EBT_LOG_ARP 0x02
 #define EBT_LOG_NFLOG 0x04
@@ -10,9 +12,9 @@
 #define EBT_LOG_WATCHER "log"
 
 struct ebt_log_info {
-	uint8_t loglevel;
-	uint8_t prefix[EBT_LOG_PREFIX_SIZE];
-	uint32_t bitmask;
+	__u8 loglevel;
+	__u8 prefix[EBT_LOG_PREFIX_SIZE];
+	__u32 bitmask;
 };
 
 #endif
diff --git a/include/linux/netfilter_bridge/ebt_mark_m.h b/include/linux/netfilter_bridge/ebt_mark_m.h
index 9ceb10e..410f9e5 100644
--- a/include/linux/netfilter_bridge/ebt_mark_m.h
+++ b/include/linux/netfilter_bridge/ebt_mark_m.h
@@ -1,13 +1,15 @@
 #ifndef __LINUX_BRIDGE_EBT_MARK_M_H
 #define __LINUX_BRIDGE_EBT_MARK_M_H
 
+#include <linux/types.h>
+
 #define EBT_MARK_AND 0x01
 #define EBT_MARK_OR 0x02
 #define EBT_MARK_MASK (EBT_MARK_AND | EBT_MARK_OR)
 struct ebt_mark_m_info {
 	unsigned long mark, mask;
-	uint8_t invert;
-	uint8_t bitmask;
+	__u8 invert;
+	__u8 bitmask;
 };
 #define EBT_MARK_MATCH "mark_m"
 
diff --git a/include/linux/netfilter_bridge/ebt_nflog.h b/include/linux/netfilter_bridge/ebt_nflog.h
index 0528178..df829fc 100644
--- a/include/linux/netfilter_bridge/ebt_nflog.h
+++ b/include/linux/netfilter_bridge/ebt_nflog.h
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_NFLOG_H
 #define __LINUX_BRIDGE_EBT_NFLOG_H
 
+#include <linux/types.h>
+
 #define EBT_NFLOG_MASK 0x0
 
 #define EBT_NFLOG_PREFIX_SIZE 64
@@ -10,11 +12,11 @@
 #define EBT_NFLOG_DEFAULT_THRESHOLD	1
 
 struct ebt_nflog_info {
-	u_int32_t len;
-	u_int16_t group;
-	u_int16_t threshold;
-	u_int16_t flags;
-	u_int16_t pad;
+	__u32 len;
+	__u16 group;
+	__u16 threshold;
+	__u16 flags;
+	__u16 pad;
 	char prefix[EBT_NFLOG_PREFIX_SIZE];
 };
 
diff --git a/include/linux/netfilter_bridge/ebt_pkttype.h b/include/linux/netfilter_bridge/ebt_pkttype.h
index 51a7998..c241bad 100644
--- a/include/linux/netfilter_bridge/ebt_pkttype.h
+++ b/include/linux/netfilter_bridge/ebt_pkttype.h
@@ -1,9 +1,11 @@
 #ifndef __LINUX_BRIDGE_EBT_PKTTYPE_H
 #define __LINUX_BRIDGE_EBT_PKTTYPE_H
 
+#include <linux/types.h>
+
 struct ebt_pkttype_info {
-	uint8_t pkt_type;
-	uint8_t invert;
+	__u8 pkt_type;
+	__u8 invert;
 };
 #define EBT_PKTTYPE_MATCH "pkttype"
 
diff --git a/include/linux/netfilter_bridge/ebt_stp.h b/include/linux/netfilter_bridge/ebt_stp.h
index e503a0a..1025b9f 100644
--- a/include/linux/netfilter_bridge/ebt_stp.h
+++ b/include/linux/netfilter_bridge/ebt_stp.h
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_STP_H
 #define __LINUX_BRIDGE_EBT_STP_H
 
+#include <linux/types.h>
+
 #define EBT_STP_TYPE		0x0001
 
 #define EBT_STP_FLAGS		0x0002
@@ -21,24 +23,24 @@
 #define EBT_STP_MATCH "stp"
 
 struct ebt_stp_config_info {
-	uint8_t flags;
-	uint16_t root_priol, root_priou;
+	__u8 flags;
+	__u16 root_priol, root_priou;
 	char root_addr[6], root_addrmsk[6];
-	uint32_t root_costl, root_costu;
-	uint16_t sender_priol, sender_priou;
+	__u32 root_costl, root_costu;
+	__u16 sender_priol, sender_priou;
 	char sender_addr[6], sender_addrmsk[6];
-	uint16_t portl, portu;
-	uint16_t msg_agel, msg_ageu;
-	uint16_t max_agel, max_ageu;
-	uint16_t hello_timel, hello_timeu;
-	uint16_t forward_delayl, forward_delayu;
+	__u16 portl, portu;
+	__u16 msg_agel, msg_ageu;
+	__u16 max_agel, max_ageu;
+	__u16 hello_timel, hello_timeu;
+	__u16 forward_delayl, forward_delayu;
 };
 
 struct ebt_stp_info {
-	uint8_t type;
+	__u8 type;
 	struct ebt_stp_config_info config;
-	uint16_t bitmask;
-	uint16_t invflags;
+	__u16 bitmask;
+	__u16 invflags;
 };
 
 #endif
diff --git a/include/linux/netfilter_bridge/ebt_ulog.h b/include/linux/netfilter_bridge/ebt_ulog.h
index b677e26..89a6bec 100644
--- a/include/linux/netfilter_bridge/ebt_ulog.h
+++ b/include/linux/netfilter_bridge/ebt_ulog.h
@@ -1,6 +1,8 @@
 #ifndef _EBT_ULOG_H
 #define _EBT_ULOG_H
 
+#include <linux/types.h>
+
 #define EBT_ULOG_DEFAULT_NLGROUP 0
 #define EBT_ULOG_DEFAULT_QTHRESHOLD 1
 #define EBT_ULOG_MAXNLGROUPS 32 /* hardcoded netlink max */
@@ -10,7 +12,7 @@
 #define EBT_ULOG_VERSION 1
 
 struct ebt_ulog_info {
-	uint32_t nlgroup;
+	__u32 nlgroup;
 	unsigned int cprange;
 	unsigned int qthreshold;
 	char prefix[EBT_ULOG_PREFIX_LEN];
diff --git a/include/linux/netfilter_bridge/ebt_vlan.h b/include/linux/netfilter_bridge/ebt_vlan.h
index 1d98be4..967d1d5 100644
--- a/include/linux/netfilter_bridge/ebt_vlan.h
+++ b/include/linux/netfilter_bridge/ebt_vlan.h
@@ -1,6 +1,8 @@
 #ifndef __LINUX_BRIDGE_EBT_VLAN_H
 #define __LINUX_BRIDGE_EBT_VLAN_H
 
+#include <linux/types.h>
+
 #define EBT_VLAN_ID	0x01
 #define EBT_VLAN_PRIO	0x02
 #define EBT_VLAN_ENCAP	0x04
@@ -8,12 +10,12 @@
 #define EBT_VLAN_MATCH "vlan"
 
 struct ebt_vlan_info {
-	uint16_t id;		/* VLAN ID {1-4095} */
-	uint8_t prio;		/* VLAN User Priority {0-7} */
+	__u16 id;		/* VLAN ID {1-4095} */
+	__u8 prio;		/* VLAN User Priority {0-7} */
 	__be16 encap;		/* VLAN Encapsulated frame code {0-65535} */
-	uint8_t bitmask;		/* Args bitmask bit 1=1 - ID arg,
+	__u8 bitmask;		/* Args bitmask bit 1=1 - ID arg,
 				   bit 2=1 User-Priority arg, bit 3=1 encap*/
-	uint8_t invflags;		/* Inverse bitmask  bit 1=1 - inversed ID arg, 
+	__u8 invflags;		/* Inverse bitmask  bit 1=1 - inversed ID arg, 
 				   bit 2=1 - inversed Pirority arg */
 };
 
diff --git a/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h b/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h
index e5a3687..c6a204c 100644
--- a/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h
+++ b/include/linux/netfilter_ipv4/ipt_CLUSTERIP.h
@@ -1,6 +1,8 @@
 #ifndef _IPT_CLUSTERIP_H_target
 #define _IPT_CLUSTERIP_H_target
 
+#include <linux/types.h>
+
 enum clusterip_hashmode {
     CLUSTERIP_HASHMODE_SIP = 0,
     CLUSTERIP_HASHMODE_SIP_SPT,
@@ -17,15 +19,15 @@
 
 struct ipt_clusterip_tgt_info {
 
-	u_int32_t flags;
+	__u32 flags;
 
 	/* only relevant for new ones */
-	u_int8_t clustermac[6];
-	u_int16_t num_total_nodes;
-	u_int16_t num_local_nodes;
-	u_int16_t local_nodes[CLUSTERIP_MAX_NODES];
-	u_int32_t hash_mode;
-	u_int32_t hash_initval;
+	__u8 clustermac[6];
+	__u16 num_total_nodes;
+	__u16 num_local_nodes;
+	__u16 local_nodes[CLUSTERIP_MAX_NODES];
+	__u32 hash_mode;
+	__u32 hash_initval;
 
 	/* Used internally by the kernel */
 	struct clusterip_config *config;
diff --git a/include/linux/netfilter_ipv4/ipt_ECN.h b/include/linux/netfilter_ipv4/ipt_ECN.h
index 7ca4591..bb88d53 100644
--- a/include/linux/netfilter_ipv4/ipt_ECN.h
+++ b/include/linux/netfilter_ipv4/ipt_ECN.h
@@ -8,6 +8,8 @@
 */
 #ifndef _IPT_ECN_TARGET_H
 #define _IPT_ECN_TARGET_H
+
+#include <linux/types.h>
 #include <linux/netfilter/xt_DSCP.h>
 
 #define IPT_ECN_IP_MASK	(~XT_DSCP_MASK)
@@ -19,11 +21,11 @@
 #define IPT_ECN_OP_MASK		0xce
 
 struct ipt_ECN_info {
-	u_int8_t operation;	/* bitset of operations */
-	u_int8_t ip_ect;	/* ECT codepoint of IPv4 header, pre-shifted */
+	__u8 operation;	/* bitset of operations */
+	__u8 ip_ect;	/* ECT codepoint of IPv4 header, pre-shifted */
 	union {
 		struct {
-			u_int8_t ece:1, cwr:1; /* TCP ECT bits */
+			__u8 ece:1, cwr:1; /* TCP ECT bits */
 		} tcp;
 	} proto;
 };
diff --git a/include/linux/netfilter_ipv4/ipt_SAME.h b/include/linux/netfilter_ipv4/ipt_SAME.h
index 2529660..5bca782 100644
--- a/include/linux/netfilter_ipv4/ipt_SAME.h
+++ b/include/linux/netfilter_ipv4/ipt_SAME.h
@@ -1,15 +1,17 @@
 #ifndef _IPT_SAME_H
 #define _IPT_SAME_H
 
+#include <linux/types.h>
+
 #define IPT_SAME_MAX_RANGE	10
 
 #define IPT_SAME_NODST		0x01
 
 struct ipt_same_info {
 	unsigned char info;
-	u_int32_t rangesize;
-	u_int32_t ipnum;
-	u_int32_t *iparray;
+	__u32 rangesize;
+	__u32 ipnum;
+	__u32 *iparray;
 
 	/* hangs off end. */
 	struct nf_nat_range range[IPT_SAME_MAX_RANGE];
diff --git a/include/linux/netfilter_ipv4/ipt_TTL.h b/include/linux/netfilter_ipv4/ipt_TTL.h
index ee6611e..f6ac169 100644
--- a/include/linux/netfilter_ipv4/ipt_TTL.h
+++ b/include/linux/netfilter_ipv4/ipt_TTL.h
@@ -4,6 +4,8 @@
 #ifndef _IPT_TTL_H
 #define _IPT_TTL_H
 
+#include <linux/types.h>
+
 enum {
 	IPT_TTL_SET = 0,
 	IPT_TTL_INC,
@@ -13,8 +15,8 @@
 #define IPT_TTL_MAXMODE	IPT_TTL_DEC
 
 struct ipt_TTL_info {
-	u_int8_t	mode;
-	u_int8_t	ttl;
+	__u8	mode;
+	__u8	ttl;
 };
 
 
diff --git a/include/linux/netfilter_ipv4/ipt_addrtype.h b/include/linux/netfilter_ipv4/ipt_addrtype.h
index 446de6a..0da4223 100644
--- a/include/linux/netfilter_ipv4/ipt_addrtype.h
+++ b/include/linux/netfilter_ipv4/ipt_addrtype.h
@@ -1,6 +1,8 @@
 #ifndef _IPT_ADDRTYPE_H
 #define _IPT_ADDRTYPE_H
 
+#include <linux/types.h>
+
 enum {
 	IPT_ADDRTYPE_INVERT_SOURCE	= 0x0001,
 	IPT_ADDRTYPE_INVERT_DEST	= 0x0002,
@@ -9,17 +11,17 @@
 };
 
 struct ipt_addrtype_info_v1 {
-	u_int16_t	source;		/* source-type mask */
-	u_int16_t	dest;		/* dest-type mask */
-	u_int32_t	flags;
+	__u16	source;		/* source-type mask */
+	__u16	dest;		/* dest-type mask */
+	__u32	flags;
 };
 
 /* revision 0 */
 struct ipt_addrtype_info {
-	u_int16_t	source;		/* source-type mask */
-	u_int16_t	dest;		/* dest-type mask */
-	u_int32_t	invert_source;
-	u_int32_t	invert_dest;
+	__u16	source;		/* source-type mask */
+	__u16	dest;		/* dest-type mask */
+	__u32	invert_source;
+	__u32	invert_dest;
 };
 
 #endif
diff --git a/include/linux/netfilter_ipv4/ipt_ah.h b/include/linux/netfilter_ipv4/ipt_ah.h
index 2e555b4..4e02bb0 100644
--- a/include/linux/netfilter_ipv4/ipt_ah.h
+++ b/include/linux/netfilter_ipv4/ipt_ah.h
@@ -1,9 +1,11 @@
 #ifndef _IPT_AH_H
 #define _IPT_AH_H
 
+#include <linux/types.h>
+
 struct ipt_ah {
-	u_int32_t spis[2];			/* Security Parameter Index */
-	u_int8_t  invflags;			/* Inverse flags */
+	__u32 spis[2];			/* Security Parameter Index */
+	__u8  invflags;			/* Inverse flags */
 };
 
 
diff --git a/include/linux/netfilter_ipv4/ipt_ecn.h b/include/linux/netfilter_ipv4/ipt_ecn.h
index 9945baa..eabf95f 100644
--- a/include/linux/netfilter_ipv4/ipt_ecn.h
+++ b/include/linux/netfilter_ipv4/ipt_ecn.h
@@ -8,6 +8,8 @@
 */
 #ifndef _IPT_ECN_H
 #define _IPT_ECN_H
+
+#include <linux/types.h>
 #include <linux/netfilter/xt_dscp.h>
 
 #define IPT_ECN_IP_MASK	(~XT_DSCP_MASK)
@@ -20,12 +22,12 @@
 
 /* match info */
 struct ipt_ecn_info {
-	u_int8_t operation;
-	u_int8_t invert;
-	u_int8_t ip_ect;
+	__u8 operation;
+	__u8 invert;
+	__u8 ip_ect;
 	union {
 		struct {
-			u_int8_t ect;
+			__u8 ect;
 		} tcp;
 	} proto;
 };
diff --git a/include/linux/netfilter_ipv4/ipt_ttl.h b/include/linux/netfilter_ipv4/ipt_ttl.h
index ee24fd8..37bee44 100644
--- a/include/linux/netfilter_ipv4/ipt_ttl.h
+++ b/include/linux/netfilter_ipv4/ipt_ttl.h
@@ -4,6 +4,8 @@
 #ifndef _IPT_TTL_H
 #define _IPT_TTL_H
 
+#include <linux/types.h>
+
 enum {
 	IPT_TTL_EQ = 0,		/* equals */
 	IPT_TTL_NE,		/* not equals */
@@ -13,8 +15,8 @@
 
 
 struct ipt_ttl_info {
-	u_int8_t	mode;
-	u_int8_t	ttl;
+	__u8	mode;
+	__u8	ttl;
 };
 
 
diff --git a/include/linux/netfilter_ipv6/ip6t_HL.h b/include/linux/netfilter_ipv6/ip6t_HL.h
index afb7813..ebd8ead 100644
--- a/include/linux/netfilter_ipv6/ip6t_HL.h
+++ b/include/linux/netfilter_ipv6/ip6t_HL.h
@@ -5,6 +5,8 @@
 #ifndef _IP6T_HL_H
 #define _IP6T_HL_H
 
+#include <linux/types.h>
+
 enum {
 	IP6T_HL_SET = 0,
 	IP6T_HL_INC,
@@ -14,8 +16,8 @@
 #define IP6T_HL_MAXMODE	IP6T_HL_DEC
 
 struct ip6t_HL_info {
-	u_int8_t	mode;
-	u_int8_t	hop_limit;
+	__u8	mode;
+	__u8	hop_limit;
 };
 
 
diff --git a/include/linux/netfilter_ipv6/ip6t_REJECT.h b/include/linux/netfilter_ipv6/ip6t_REJECT.h
index 6be6504..205ed62 100644
--- a/include/linux/netfilter_ipv6/ip6t_REJECT.h
+++ b/include/linux/netfilter_ipv6/ip6t_REJECT.h
@@ -1,6 +1,8 @@
 #ifndef _IP6T_REJECT_H
 #define _IP6T_REJECT_H
 
+#include <linux/types.h>
+
 enum ip6t_reject_with {
 	IP6T_ICMP6_NO_ROUTE,
 	IP6T_ICMP6_ADM_PROHIBITED,
@@ -12,7 +14,7 @@
 };
 
 struct ip6t_reject_info {
-	u_int32_t	with;	/* reject type */
+	__u32	with;	/* reject type */
 };
 
 #endif /*_IP6T_REJECT_H*/
diff --git a/include/linux/netfilter_ipv6/ip6t_ah.h b/include/linux/netfilter_ipv6/ip6t_ah.h
index 17a745c..5da2b65 100644
--- a/include/linux/netfilter_ipv6/ip6t_ah.h
+++ b/include/linux/netfilter_ipv6/ip6t_ah.h
@@ -1,11 +1,13 @@
 #ifndef _IP6T_AH_H
 #define _IP6T_AH_H
 
+#include <linux/types.h>
+
 struct ip6t_ah {
-	u_int32_t spis[2];			/* Security Parameter Index */
-	u_int32_t hdrlen;			/* Header Length */
-	u_int8_t  hdrres;			/* Test of the Reserved Filed */
-	u_int8_t  invflags;			/* Inverse flags */
+	__u32 spis[2];			/* Security Parameter Index */
+	__u32 hdrlen;			/* Header Length */
+	__u8  hdrres;			/* Test of the Reserved Filed */
+	__u8  invflags;			/* Inverse flags */
 };
 
 #define IP6T_AH_SPI 0x01
diff --git a/include/linux/netfilter_ipv6/ip6t_frag.h b/include/linux/netfilter_ipv6/ip6t_frag.h
index 3724d08..b47f61b 100644
--- a/include/linux/netfilter_ipv6/ip6t_frag.h
+++ b/include/linux/netfilter_ipv6/ip6t_frag.h
@@ -1,11 +1,13 @@
 #ifndef _IP6T_FRAG_H
 #define _IP6T_FRAG_H
 
+#include <linux/types.h>
+
 struct ip6t_frag {
-	u_int32_t ids[2];			/* Security Parameter Index */
-	u_int32_t hdrlen;			/* Header Length */
-	u_int8_t  flags;			/*  */
-	u_int8_t  invflags;			/* Inverse flags */
+	__u32 ids[2];			/* Security Parameter Index */
+	__u32 hdrlen;			/* Header Length */
+	__u8  flags;			/*  */
+	__u8  invflags;			/* Inverse flags */
 };
 
 #define IP6T_FRAG_IDS 		0x01
diff --git a/include/linux/netfilter_ipv6/ip6t_hl.h b/include/linux/netfilter_ipv6/ip6t_hl.h
index 5ef91b8..6e76dbc 100644
--- a/include/linux/netfilter_ipv6/ip6t_hl.h
+++ b/include/linux/netfilter_ipv6/ip6t_hl.h
@@ -5,6 +5,8 @@
 #ifndef _IP6T_HL_H
 #define _IP6T_HL_H
 
+#include <linux/types.h>
+
 enum {
 	IP6T_HL_EQ = 0,		/* equals */
 	IP6T_HL_NE,		/* not equals */
@@ -14,8 +16,8 @@
 
 
 struct ip6t_hl_info {
-	u_int8_t	mode;
-	u_int8_t	hop_limit;
+	__u8	mode;
+	__u8	hop_limit;
 };
 
 
diff --git a/include/linux/netfilter_ipv6/ip6t_ipv6header.h b/include/linux/netfilter_ipv6/ip6t_ipv6header.h
index 01dfd44..efae3a2 100644
--- a/include/linux/netfilter_ipv6/ip6t_ipv6header.h
+++ b/include/linux/netfilter_ipv6/ip6t_ipv6header.h
@@ -8,10 +8,12 @@
 #ifndef __IPV6HEADER_H
 #define __IPV6HEADER_H
 
+#include <linux/types.h>
+
 struct ip6t_ipv6header_info {
-	u_int8_t matchflags;
-	u_int8_t invflags;
-	u_int8_t modeflag;
+	__u8 matchflags;
+	__u8 invflags;
+	__u8 modeflag;
 };
 
 #define MASK_HOPOPTS    128
diff --git a/include/linux/netfilter_ipv6/ip6t_mh.h b/include/linux/netfilter_ipv6/ip6t_mh.h
index 18549bc..a7729a5 100644
--- a/include/linux/netfilter_ipv6/ip6t_mh.h
+++ b/include/linux/netfilter_ipv6/ip6t_mh.h
@@ -1,10 +1,12 @@
 #ifndef _IP6T_MH_H
 #define _IP6T_MH_H
 
+#include <linux/types.h>
+
 /* MH matching stuff */
 struct ip6t_mh {
-	u_int8_t types[2];	/* MH type range */
-	u_int8_t invflags;	/* Inverse flags */
+	__u8 types[2];	/* MH type range */
+	__u8 invflags;	/* Inverse flags */
 };
 
 /* Values for "invflags" field in struct ip6t_mh. */
diff --git a/include/linux/netfilter_ipv6/ip6t_opts.h b/include/linux/netfilter_ipv6/ip6t_opts.h
index 62d89bc..17d419a 100644
--- a/include/linux/netfilter_ipv6/ip6t_opts.h
+++ b/include/linux/netfilter_ipv6/ip6t_opts.h
@@ -1,14 +1,16 @@
 #ifndef _IP6T_OPTS_H
 #define _IP6T_OPTS_H
 
+#include <linux/types.h>
+
 #define IP6T_OPTS_OPTSNR 16
 
 struct ip6t_opts {
-	u_int32_t hdrlen;			/* Header Length */
-	u_int8_t flags;				/*  */
-	u_int8_t invflags;			/* Inverse flags */
-	u_int16_t opts[IP6T_OPTS_OPTSNR];	/* opts */
-	u_int8_t optsnr;			/* Nr of OPts */
+	__u32 hdrlen;			/* Header Length */
+	__u8 flags;				/*  */
+	__u8 invflags;			/* Inverse flags */
+	__u16 opts[IP6T_OPTS_OPTSNR];	/* opts */
+	__u8 optsnr;			/* Nr of OPts */
 };
 
 #define IP6T_OPTS_LEN 		0x01
diff --git a/include/linux/netfilter_ipv6/ip6t_rt.h b/include/linux/netfilter_ipv6/ip6t_rt.h
index ab91bfd..7605a5f 100644
--- a/include/linux/netfilter_ipv6/ip6t_rt.h
+++ b/include/linux/netfilter_ipv6/ip6t_rt.h
@@ -1,18 +1,19 @@
 #ifndef _IP6T_RT_H
 #define _IP6T_RT_H
 
+#include <linux/types.h>
 /*#include <linux/in6.h>*/
 
 #define IP6T_RT_HOPS 16
 
 struct ip6t_rt {
-	u_int32_t rt_type;			/* Routing Type */
-	u_int32_t segsleft[2];			/* Segments Left */
-	u_int32_t hdrlen;			/* Header Length */
-	u_int8_t  flags;			/*  */
-	u_int8_t  invflags;			/* Inverse flags */
+	__u32 rt_type;			/* Routing Type */
+	__u32 segsleft[2];			/* Segments Left */
+	__u32 hdrlen;			/* Header Length */
+	__u8  flags;			/*  */
+	__u8  invflags;			/* Inverse flags */
 	struct in6_addr addrs[IP6T_RT_HOPS];	/* Hops */
-	u_int8_t addrnr;			/* Nr of Addresses */
+	__u8 addrnr;			/* Nr of Addresses */
 };
 
 #define IP6T_RT_TYP 		0x01
diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h
index f321b57..fabcb1e 100644
--- a/include/linux/nfsacl.h
+++ b/include/linux/nfsacl.h
@@ -51,10 +51,10 @@
 	return w;
 }
 
-extern unsigned int
+extern int
 nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
 	      struct posix_acl *acl, int encode_entries, int typeflag);
-extern unsigned int
+extern int
 nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt,
 	      struct posix_acl **pacl);
 
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 32fb812..1ca6411 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -16,6 +16,8 @@
 #include <linux/types.h>
 #include <linux/spinlock.h>
 #include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
 #include <asm/atomic.h>
  
 /* Each escaped entry is prefixed by ESCAPE_CODE
@@ -186,10 +188,17 @@
 int oprofile_add_data64(struct op_entry *entry, u64 val);
 int oprofile_write_commit(struct op_entry *entry);
 
-#ifdef CONFIG_PERF_EVENTS
+#ifdef CONFIG_HW_PERF_EVENTS
 int __init oprofile_perf_init(struct oprofile_operations *ops);
 void oprofile_perf_exit(void);
 char *op_name_from_perf_id(void);
-#endif /* CONFIG_PERF_EVENTS */
+#else
+static inline int __init oprofile_perf_init(struct oprofile_operations *ops)
+{
+	pr_info("oprofile: hardware counters not available\n");
+	return -ENODEV;
+}
+static inline void oprofile_perf_exit(void) { }
+#endif /* CONFIG_HW_PERF_EVENTS */
 
 #endif /* OPROFILE_H */
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index 2cfa4bc..d4bb6f5 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -247,6 +247,35 @@
 	__u16		pad1;
 };
 
+/* CHOKe section */
+
+enum {
+	TCA_CHOKE_UNSPEC,
+	TCA_CHOKE_PARMS,
+	TCA_CHOKE_STAB,
+	__TCA_CHOKE_MAX,
+};
+
+#define TCA_CHOKE_MAX (__TCA_CHOKE_MAX - 1)
+
+struct tc_choke_qopt {
+	__u32		limit;		/* Hard queue length (packets)	*/
+	__u32		qth_min;	/* Min average threshold (packets) */
+	__u32		qth_max;	/* Max average threshold (packets) */
+	unsigned char   Wlog;		/* log(W)		*/
+	unsigned char   Plog;		/* log(P_max/(qth_max-qth_min))	*/
+	unsigned char   Scell_log;	/* cell size for idle damping */
+	unsigned char	flags;		/* see RED flags */
+};
+
+struct tc_choke_xstats {
+	__u32		early;          /* Early drops */
+	__u32		pdrop;          /* Drops due to queue limits */
+	__u32		other;          /* Drops due to drop() calls */
+	__u32		marked;         /* Marked packets */
+	__u32		matched;	/* Drops due to flow match */
+};
+
 /* HTB section */
 #define TC_HTB_NUMPRIO		8
 #define TC_HTB_MAXDEPTH		8
@@ -481,4 +510,16 @@
 	__u32	deficit;
 };
 
+/* MQPRIO */
+#define TC_QOPT_BITMASK 15
+#define TC_QOPT_MAX_QUEUE 16
+
+struct tc_mqprio_qopt {
+	__u8	num_tc;
+	__u8	prio_tc_map[TC_QOPT_BITMASK + 1];
+	__u8	hw;
+	__u16	count[TC_QOPT_MAX_QUEUE];
+	__u16	offset[TC_QOPT_MAX_QUEUE];
+};
+
 #endif
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
index d68283a..54211c1 100644
--- a/include/linux/posix_acl.h
+++ b/include/linux/posix_acl.h
@@ -71,6 +71,7 @@
 
 /* posix_acl.c */
 
+extern void posix_acl_init(struct posix_acl *, int);
 extern struct posix_acl *posix_acl_alloc(int, gfp_t);
 extern struct posix_acl *posix_acl_clone(const struct posix_acl *, gfp_t);
 extern int posix_acl_valid(const struct posix_acl *);
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 94c1f03..9a85412 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -322,9 +322,12 @@
 	qsize_t *(*get_reserved_space) (struct inode *);
 };
 
+struct path;
+
 /* Operations handling requests from userspace */
 struct quotactl_ops {
-	int (*quota_on)(struct super_block *, int, int, char *);
+	int (*quota_on)(struct super_block *, int, int, struct path *);
+	int (*quota_on_meta)(struct super_block *, int, int);
 	int (*quota_off)(struct super_block *, int);
 	int (*quota_sync)(struct super_block *, int, int);
 	int (*get_info)(struct super_block *, int, struct if_dqinfo *);
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 223b14c..eb354f6 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -76,11 +76,9 @@
 
 int dquot_file_open(struct inode *inode, struct file *file);
 
-int dquot_quota_on(struct super_block *sb, int type, int format_id,
-	char *path);
 int dquot_enable(struct inode *inode, int type, int format_id,
 	unsigned int flags);
-int dquot_quota_on_path(struct super_block *sb, int type, int format_id,
+int dquot_quota_on(struct super_block *sb, int type, int format_id,
  	struct path *path);
 int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
  	int format_id, int type);
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index fcb9884..a5930cb 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -182,6 +182,26 @@
 	return ret;
 }
 
+/**
+ * res_counter_check_margin - check if the counter allows charging
+ * @cnt: the resource counter to check
+ * @bytes: the number of bytes to check the remaining space against
+ *
+ * Returns a boolean value on whether the counter can be charged
+ * @bytes or whether this would exceed the limit.
+ */
+static inline bool res_counter_check_margin(struct res_counter *cnt,
+					    unsigned long bytes)
+{
+	bool ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cnt->lock, flags);
+	ret = cnt->limit - cnt->usage >= bytes;
+	spin_unlock_irqrestore(&cnt->lock, flags);
+	return ret;
+}
+
 static inline bool res_counter_check_under_soft_limit(struct res_counter *cnt)
 {
 	bool ret;
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 3c995b4..89c3e51 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -203,6 +203,18 @@
 	struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
 	int pie_enabled;
 	struct work_struct irqwork;
+
+
+#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
+	struct work_struct uie_task;
+	struct timer_list uie_timer;
+	/* Those fields are protected by rtc->irq_lock */
+	unsigned int oldsecs;
+	unsigned int uie_irq_active:1;
+	unsigned int stop_uie_polling:1;
+	unsigned int uie_task_active:1;
+	unsigned int uie_timer_active:1;
+#endif
 };
 #define to_rtc_device(d) container_of(d, struct rtc_device, dev)
 
@@ -238,6 +250,7 @@
 extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc,
 						unsigned int enabled);
 
+void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode);
 void rtc_aie_update_irq(void *private);
 void rtc_uie_update_irq(void *private);
 enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer);
@@ -246,8 +259,6 @@
 int rtc_unregister(rtc_task_t *task);
 int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
 
-void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer);
-void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer);
 void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data);
 int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
 			ktime_t expires, ktime_t period);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d747f94..777d8a5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1744,7 +1744,7 @@
 #define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
 #define PF_MEMPOLICY	0x10000000	/* Non-default NUMA mempolicy */
 #define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
-#define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezeable */
+#define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
 #define PF_FREEZER_NOSIG 0x80000000	/* Freezer won't send signals to it */
 
 /*
diff --git a/include/linux/security.h b/include/linux/security.h
index c642bb8..b2b7f97 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1662,7 +1662,7 @@
 		    const kernel_cap_t *effective,
 		    const kernel_cap_t *inheritable,
 		    const kernel_cap_t *permitted);
-int security_capable(int cap);
+int security_capable(const struct cred *cred, int cap);
 int security_real_capable(struct task_struct *tsk, int cap);
 int security_real_capable_noaudit(struct task_struct *tsk, int cap);
 int security_sysctl(struct ctl_table *table, int op);
@@ -1856,9 +1856,9 @@
 	return cap_capset(new, old, effective, inheritable, permitted);
 }
 
-static inline int security_capable(int cap)
+static inline int security_capable(const struct cred *cred, int cap)
 {
-	return cap_capable(current, current_cred(), cap, SECURITY_CAP_AUDIT);
+	return cap_capable(current, cred, cap, SECURITY_CAP_AUDIT);
 }
 
 static inline int security_real_capable(struct task_struct *tsk, int cap)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index bf221d6..31f02d0 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1801,6 +1801,15 @@
 		     prefetch(skb->prev), (skb != (struct sk_buff *)(queue));	\
 		     skb = skb->prev)
 
+#define skb_queue_reverse_walk_safe(queue, skb, tmp)				\
+		for (skb = (queue)->prev, tmp = skb->prev;			\
+		     skb != (struct sk_buff *)(queue);				\
+		     skb = tmp, tmp = skb->prev)
+
+#define skb_queue_reverse_walk_from_safe(queue, skb, tmp)			\
+		for (tmp = skb->prev;						\
+		     skb != (struct sk_buff *)(queue);				\
+		     skb = tmp, tmp = skb->prev)
 
 static inline bool skb_has_frag_list(const struct sk_buff *skb)
 {
@@ -1868,7 +1877,7 @@
 extern int	       skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
 				 int shiftlen);
 
-extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
+extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features);
 
 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
 				       int len, void *buffer)
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index c50b458..0828842 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -47,14 +47,6 @@
 		return 1;
 	return 0;
 }
-static inline struct nfs4_sessionid *bc_xprt_sid(struct svc_rqst *rqstp)
-{
-	if (svc_is_backchannel(rqstp))
-		return (struct nfs4_sessionid *)
-			rqstp->rq_server->sv_bc_xprt->xpt_bc_sid;
-	return NULL;
-}
-
 #else /* CONFIG_NFS_V4_1 */
 static inline int xprt_setup_backchannel(struct rpc_xprt *xprt,
 					 unsigned int min_reqs)
@@ -67,11 +59,6 @@
 	return 0;
 }
 
-static inline struct nfs4_sessionid *bc_xprt_sid(struct svc_rqst *rqstp)
-{
-	return NULL;
-}
-
 static inline void xprt_free_bc_request(struct rpc_rqst *req)
 {
 }
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 059877b..7ad9751 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -77,7 +77,6 @@
 	size_t			xpt_remotelen;	/* length of address */
 	struct rpc_wait_queue	xpt_bc_pending;	/* backchannel wait queue */
 	struct list_head	xpt_users;	/* callbacks on free */
-	void			*xpt_bc_sid;	/* back channel session ID */
 
 	struct net		*xpt_net;
 	struct rpc_xprt		*xpt_bc_xprt;	/* NFSv4.1 backchannel */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 18cd068..98664db 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -125,39 +125,37 @@
 extern struct trace_event_functions exit_syscall_print_funcs;
 
 #define SYSCALL_TRACE_ENTER_EVENT(sname)				\
-	static struct syscall_metadata					\
-	__attribute__((__aligned__(4))) __syscall_meta_##sname;		\
+	static struct syscall_metadata __syscall_meta_##sname;		\
 	static struct ftrace_event_call __used				\
-	  __attribute__((__aligned__(4)))				\
-	  __attribute__((section("_ftrace_events")))			\
 	  event_enter_##sname = {					\
 		.name                   = "sys_enter"#sname,		\
 		.class			= &event_class_syscall_enter,	\
 		.event.funcs            = &enter_syscall_print_funcs,	\
 		.data			= (void *)&__syscall_meta_##sname,\
 	};								\
+	static struct ftrace_event_call __used				\
+	  __attribute__((section("_ftrace_events")))			\
+	 *__event_enter_##sname = &event_enter_##sname;			\
 	__TRACE_EVENT_FLAGS(enter_##sname, TRACE_EVENT_FL_CAP_ANY)
 
 #define SYSCALL_TRACE_EXIT_EVENT(sname)					\
-	static struct syscall_metadata					\
-	__attribute__((__aligned__(4))) __syscall_meta_##sname;		\
+	static struct syscall_metadata __syscall_meta_##sname;		\
 	static struct ftrace_event_call __used				\
-	  __attribute__((__aligned__(4)))				\
-	  __attribute__((section("_ftrace_events")))			\
 	  event_exit_##sname = {					\
 		.name                   = "sys_exit"#sname,		\
 		.class			= &event_class_syscall_exit,	\
 		.event.funcs		= &exit_syscall_print_funcs,	\
 		.data			= (void *)&__syscall_meta_##sname,\
 	};								\
+	static struct ftrace_event_call __used				\
+	  __attribute__((section("_ftrace_events")))			\
+	*__event_exit_##sname = &event_exit_##sname;			\
 	__TRACE_EVENT_FLAGS(exit_##sname, TRACE_EVENT_FL_CAP_ANY)
 
 #define SYSCALL_METADATA(sname, nb)				\
 	SYSCALL_TRACE_ENTER_EVENT(sname);			\
 	SYSCALL_TRACE_EXIT_EVENT(sname);			\
 	static struct syscall_metadata __used			\
-	  __attribute__((__aligned__(4)))			\
-	  __attribute__((section("__syscalls_metadata")))	\
 	  __syscall_meta_##sname = {				\
 		.name 		= "sys"#sname,			\
 		.nb_args 	= nb,				\
@@ -166,14 +164,15 @@
 		.enter_event	= &event_enter_##sname,		\
 		.exit_event	= &event_exit_##sname,		\
 		.enter_fields	= LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \
-	};
+	};							\
+	static struct syscall_metadata __used			\
+	  __attribute__((section("__syscalls_metadata")))	\
+	 *__p_syscall_meta_##sname = &__syscall_meta_##sname;
 
 #define SYSCALL_DEFINE0(sname)					\
 	SYSCALL_TRACE_ENTER_EVENT(_##sname);			\
 	SYSCALL_TRACE_EXIT_EVENT(_##sname);			\
 	static struct syscall_metadata __used			\
-	  __attribute__((__aligned__(4)))			\
-	  __attribute__((section("__syscalls_metadata")))	\
 	  __syscall_meta__##sname = {				\
 		.name 		= "sys_"#sname,			\
 		.nb_args 	= 0,				\
@@ -181,6 +180,9 @@
 		.exit_event	= &event_exit__##sname,		\
 		.enter_fields	= LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \
 	};							\
+	static struct syscall_metadata __used			\
+	  __attribute__((section("__syscalls_metadata")))	\
+	 *__p_syscall_meta_##sname = &__syscall_meta__##sname;	\
 	asmlinkage long sys_##sname(void)
 #else
 #define SYSCALL_DEFINE0(name)	   asmlinkage long sys_##name(void)
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 387fa7d..7faf933 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -17,6 +17,9 @@
 #include <linux/errno.h>
 #include <linux/types.h>
 
+/* Enable/disable SYSRQ support by default (0==no, 1==yes). */
+#define SYSRQ_DEFAULT_ENABLE	1
+
 /* Possible values of bitmask for enabling sysrq functions */
 /* 0x0001 is reserved for enable everything */
 #define SYSRQ_ENABLE_LOG	0x0002
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index c681461..97c84a5 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -33,12 +33,7 @@
 	void (*regfunc)(void);
 	void (*unregfunc)(void);
 	struct tracepoint_func __rcu *funcs;
-} __attribute__((aligned(32)));		/*
-					 * Aligned on 32 bytes because it is
-					 * globally visible and gcc happily
-					 * align these on the structure size.
-					 * Keep in sync with vmlinux.lds.h.
-					 */
+};
 
 /*
  * Connect a probe to a tracepoint.
@@ -61,15 +56,15 @@
 
 struct tracepoint_iter {
 	struct module *module;
-	struct tracepoint *tracepoint;
+	struct tracepoint * const *tracepoint;
 };
 
 extern void tracepoint_iter_start(struct tracepoint_iter *iter);
 extern void tracepoint_iter_next(struct tracepoint_iter *iter);
 extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
 extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
-extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
-	struct tracepoint *begin, struct tracepoint *end);
+extern int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
+	struct tracepoint * const *begin, struct tracepoint * const *end);
 
 /*
  * tracepoint_synchronize_unregister must be called between the last tracepoint
@@ -84,11 +79,13 @@
 #define PARAMS(args...) args
 
 #ifdef CONFIG_TRACEPOINTS
-extern void tracepoint_update_probe_range(struct tracepoint *begin,
-	struct tracepoint *end);
+extern
+void tracepoint_update_probe_range(struct tracepoint * const *begin,
+	struct tracepoint * const *end);
 #else
-static inline void tracepoint_update_probe_range(struct tracepoint *begin,
-	struct tracepoint *end)
+static inline
+void tracepoint_update_probe_range(struct tracepoint * const *begin,
+	struct tracepoint * const *end)
 { }
 #endif /* CONFIG_TRACEPOINTS */
 
@@ -174,12 +171,20 @@
 	{								\
 	}
 
+/*
+ * We have no guarantee that gcc and the linker won't up-align the tracepoint
+ * structures, so we create an array of pointers that will be used for iteration
+ * on the tracepoints.
+ */
 #define DEFINE_TRACE_FN(name, reg, unreg)				\
 	static const char __tpstrtab_##name[]				\
 	__attribute__((section("__tracepoints_strings"))) = #name;	\
 	struct tracepoint __tracepoint_##name				\
-	__attribute__((section("__tracepoints"), aligned(32))) =	\
-		{ __tpstrtab_##name, 0, reg, unreg, NULL }
+	__attribute__((section("__tracepoints"))) =			\
+		{ __tpstrtab_##name, 0, reg, unreg, NULL };		\
+	static struct tracepoint * const __tracepoint_ptr_##name __used	\
+	__attribute__((section("__tracepoints_ptrs"))) =		\
+		&__tracepoint_##name;
 
 #define DEFINE_TRACE(name)						\
 	DEFINE_TRACE_FN(name, NULL, NULL);
diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h
index 5e86dc7..81a9279 100644
--- a/include/linux/usb/cdc.h
+++ b/include/linux/usb/cdc.h
@@ -89,7 +89,7 @@
 
 #define USB_CDC_COMM_FEATURE	0x01
 #define USB_CDC_CAP_LINE	0x02
-#define USB_CDC_CAP_BRK	0x04
+#define USB_CDC_CAP_BRK		0x04
 #define USB_CDC_CAP_NOTIFY	0x08
 
 /* "Union Functional Descriptor" from CDC spec 5.2.3.8 */
@@ -271,6 +271,11 @@
 	__le16	wLength;
 } __attribute__ ((packed));
 
+struct usb_cdc_speed_change {
+	__le32	DLBitRRate;	/* contains the downlink bit rate (IN pipe) */
+	__le32	ULBitRate;	/* contains the uplink bit rate (OUT pipe) */
+} __attribute__ ((packed));
+
 /*-------------------------------------------------------------------------*/
 
 /*
@@ -292,7 +297,7 @@
 	__le16	wNdpOutDivisor;
 	__le16	wNdpOutPayloadRemainder;
 	__le16	wNdpOutAlignment;
-	__le16	wPadding2;
+	__le16	wNtbOutMaxDatagrams;
 } __attribute__ ((packed));
 
 /*
@@ -307,7 +312,7 @@
 	__le16	wHeaderLength;
 	__le16	wSequence;
 	__le16	wBlockLength;
-	__le16	wFpIndex;
+	__le16	wNdpIndex;
 } __attribute__ ((packed));
 
 struct usb_cdc_ncm_nth32 {
@@ -315,7 +320,7 @@
 	__le16	wHeaderLength;
 	__le16	wSequence;
 	__le32	dwBlockLength;
-	__le32	dwFpIndex;
+	__le32	dwNdpIndex;
 } __attribute__ ((packed));
 
 /*
@@ -337,7 +342,7 @@
 struct usb_cdc_ncm_ndp16 {
 	__le32	dwSignature;
 	__le16	wLength;
-	__le16	wNextFpIndex;
+	__le16	wNextNdpIndex;
 	struct	usb_cdc_ncm_dpe16 dpe16[0];
 } __attribute__ ((packed));
 
@@ -375,6 +380,7 @@
 #define USB_CDC_NCM_NCAP_ENCAP_COMMAND			(1 << 2)
 #define USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE		(1 << 3)
 #define USB_CDC_NCM_NCAP_CRC_MODE			(1 << 4)
+#define	USB_CDC_NCM_NCAP_NTB_INPUT_SIZE			(1 << 5)
 
 /* CDC NCM subclass Table 6-3: NTB Parameter Structure */
 #define USB_CDC_NCM_NTB16_SUPPORTED			(1 << 0)
@@ -392,6 +398,13 @@
 #define USB_CDC_NCM_NTB_MIN_IN_SIZE			2048
 #define USB_CDC_NCM_NTB_MIN_OUT_SIZE			2048
 
+/* NTB Input Size Structure */
+struct usb_cdc_ncm_ndp_input_size {
+	__le32	dwNtbInMaxSize;
+	__le16	wNtbInMaxDatagrams;
+	__le16	wReserved;
+} __attribute__ ((packed));
+
 /* CDC NCM subclass 6.2.11 SetCrcMode */
 #define USB_CDC_NCM_CRC_NOT_APPENDED			0x00
 #define USB_CDC_NCM_CRC_APPENDED			0x01
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index dd6ee49..a854fe8 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -112,6 +112,7 @@
 	/* Flags that get set only during HCD registration or removal. */
 	unsigned		rh_registered:1;/* is root hub registered? */
 	unsigned		rh_pollable:1;	/* may we poll the root hub? */
+	unsigned		msix_enabled:1;	/* driver has MSI-X enabled? */
 
 	/* The next flag is a stopgap, to be removed when all the HCDs
 	 * support the new root-hub polling mechanism. */
diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h
index b92e173..7d1babb 100644
--- a/include/linux/usb/msm_hsusb_hw.h
+++ b/include/linux/usb/msm_hsusb_hw.h
@@ -16,12 +16,8 @@
 #ifndef __LINUX_USB_GADGET_MSM72K_UDC_H__
 #define __LINUX_USB_GADGET_MSM72K_UDC_H__
 
-#ifdef CONFIG_ARCH_MSM7X00A
-#define USB_SBUSCFG          (MSM_USB_BASE + 0x0090)
-#else
 #define USB_AHBBURST         (MSM_USB_BASE + 0x0090)
 #define USB_AHBMODE          (MSM_USB_BASE + 0x0098)
-#endif
 #define USB_CAPLENGTH        (MSM_USB_BASE + 0x0100) /* 8 bit */
 
 #define USB_USBCMD           (MSM_USB_BASE + 0x0140)
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 16d682f..c904913 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -347,6 +347,9 @@
 extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port,
 					unsigned int ch);
 extern int usb_serial_handle_break(struct usb_serial_port *port);
+extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
+					 struct tty_struct *tty,
+					 unsigned int status);
 
 
 extern int usb_serial_bus_register(struct usb_serial_driver *device);
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 0093dd7..800617b 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -109,7 +109,10 @@
 				      unsigned int fbit)
 {
 	/* Did you forget to fix assumptions on max features? */
-	MAYBE_BUILD_BUG_ON(fbit >= 32);
+	if (__builtin_constant_p(fbit))
+		BUILD_BUG_ON(fbit >= 32);
+	else
+		BUG_ON(fbit >= 32);
 
 	if (fbit < VIRTIO_TRANSPORT_F_START)
 		virtio_check_driver_offered_feature(vdev, fbit);
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h
index a85064d..e4d3335 100644
--- a/include/linux/virtio_console.h
+++ b/include/linux/virtio_console.h
@@ -7,7 +7,8 @@
  * This header, excluding the #ifdef __KERNEL__ part, is BSD licensed so
  * anyone can use the definitions to implement compatible drivers/servers.
  *
- * Copyright (C) Red Hat, Inc., 2009, 2010
+ * Copyright (C) Red Hat, Inc., 2009, 2010, 2011
+ * Copyright (C) Amit Shah <amit.shah@redhat.com>, 2009, 2010, 2011
  */
 
 /* Feature bits */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 1ac1158..f7998a3 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -250,7 +250,7 @@
 enum {
 	WQ_NON_REENTRANT	= 1 << 0, /* guarantee non-reentrance */
 	WQ_UNBOUND		= 1 << 1, /* not bound to any cpu */
-	WQ_FREEZEABLE		= 1 << 2, /* freeze during suspend */
+	WQ_FREEZABLE		= 1 << 2, /* freeze during suspend */
 	WQ_MEM_RECLAIM		= 1 << 3, /* may be used for memory reclaim */
 	WQ_HIGHPRI		= 1 << 4, /* high priority */
 	WQ_CPU_INTENSIVE	= 1 << 5, /* cpu instensive workqueue */
@@ -318,7 +318,7 @@
 /**
  * alloc_ordered_workqueue - allocate an ordered workqueue
  * @name: name of the workqueue
- * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful)
+ * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
  *
  * Allocate an ordered workqueue.  An ordered workqueue executes at
  * most one work item at any given time in the queued order.  They are
@@ -335,8 +335,8 @@
 
 #define create_workqueue(name)					\
 	alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
-#define create_freezeable_workqueue(name)			\
-	alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
+#define create_freezable_workqueue(name)			\
+	alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
 #define create_singlethread_workqueue(name)			\
 	alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
 
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index 930fdd2..b93d6f5 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -350,6 +350,7 @@
 #define XFRM_STATE_WILDRECV	8
 #define XFRM_STATE_ICMP		16
 #define XFRM_STATE_AF_UNSPEC	32
+#define XFRM_STATE_ALIGN4	64
 };
 
 struct xfrm_usersa_id {
diff --git a/include/media/mt9v011.h b/include/media/mt9v011.h
new file mode 100644
index 0000000..ea29fc7
--- /dev/null
+++ b/include/media/mt9v011.h
@@ -0,0 +1,17 @@
+/* mt9v011 sensor
+ *
+ * Copyright (C) 2011 Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MT9V011_H__
+#define __MT9V011_H__
+
+struct mt9v011_platform_data {
+	unsigned xtal;	/* Hz */
+};
+
+#endif
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index a23c1fc..2963263 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -183,6 +183,9 @@
 }
 
 #define IR_MAX_DURATION         0xFFFFFFFF      /* a bit more than 4 seconds */
+#define US_TO_NS(usec)		((usec) * 1000)
+#define MS_TO_US(msec)		((msec) * 1000)
+#define MS_TO_NS(msec)		((msec) * 1000 * 1000)
 
 void ir_raw_event_handle(struct rc_dev *dev);
 int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev);
diff --git a/include/media/saa7146.h b/include/media/saa7146.h
index ac7ce00..7982714 100644
--- a/include/media/saa7146.h
+++ b/include/media/saa7146.h
@@ -115,7 +115,7 @@
 
 	/* different device locks */
 	spinlock_t			slock;
-	struct mutex			lock;
+	struct mutex			v4l2_lock;
 
 	unsigned char			__iomem *mem;		/* pointer to mapped IO memory */
 	u32				revision;	/* chip revision; needed for bug-workarounds*/
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 2d65b35..a659319 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -138,21 +138,10 @@
 
 /* Load an i2c module and return an initialized v4l2_subdev struct.
    The client_type argument is the name of the chip that's on the adapter. */
-struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
+struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
 		struct i2c_adapter *adapter, const char *client_type,
-		int irq, void *platform_data,
 		u8 addr, const unsigned short *probe_addrs);
 
-/* Load an i2c module and return an initialized v4l2_subdev struct.
-   The client_type argument is the name of the chip that's on the adapter. */
-static inline struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
-		struct i2c_adapter *adapter, const char *client_type,
-		u8 addr, const unsigned short *probe_addrs)
-{
-	return v4l2_i2c_new_subdev_cfg(v4l2_dev, adapter, client_type, 0, NULL,
-				       addr, probe_addrs);
-}
-
 struct i2c_board_info;
 
 struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index d69ab4a..97d0638 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -23,6 +23,7 @@
 
 #include <linux/list.h>
 #include <linux/device.h>
+#include <linux/videodev2.h>
 
 /* forward references */
 struct v4l2_ctrl_handler;
@@ -53,8 +54,10 @@
   * @handler:	The handler that owns the control.
   * @cluster:	Point to start of cluster array.
   * @ncontrols:	Number of controls in cluster array.
-  * @has_new:	Internal flag: set when there is a valid new value.
   * @done:	Internal flag: set for each processed control.
+  * @is_new:	Set when the user specified a new value for this control. It
+  *		is also set when called from v4l2_ctrl_handler_setup. Drivers
+  *		should never set this flag.
   * @is_private: If set, then this control is private to its handler and it
   *		will not be added to any other handlers. Drivers can set
   *		this flag.
@@ -97,9 +100,9 @@
 	struct v4l2_ctrl_handler *handler;
 	struct v4l2_ctrl **cluster;
 	unsigned ncontrols;
-	unsigned int has_new:1;
 	unsigned int done:1;
 
+	unsigned int is_new:1;
 	unsigned int is_private:1;
 	unsigned int is_volatile:1;
 
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index b0316a7..daf1e57 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -106,10 +106,7 @@
 	u8 strength;	/* Pin drive strength */
 };
 
-/* s_config: if set, then it is always called by the v4l2_i2c_new_subdev*
-	functions after the v4l2_subdev was registered. It is used to pass
-	platform data to the subdev which can be used during initialization.
-
+/*
    s_io_pin_config: configure one or more chip I/O pins for chips that
 	multiplex different internal signal pads out to IO pins.  This function
 	takes a pointer to an array of 'n' pin configuration entries, one for
@@ -141,7 +138,6 @@
 struct v4l2_subdev_core_ops {
 	int (*g_chip_ident)(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip);
 	int (*log_status)(struct v4l2_subdev *sd);
-	int (*s_config)(struct v4l2_subdev *sd, int irq, void *platform_data);
 	int (*s_io_pin_config)(struct v4l2_subdev *sd, size_t n,
 				      struct v4l2_subdev_io_pin_config *pincfg);
 	int (*init)(struct v4l2_subdev *sd, u32 val);
@@ -415,6 +411,21 @@
 	const struct v4l2_subdev_sensor_ops	*sensor;
 };
 
+/*
+ * Internal ops. Never call this from drivers, only the v4l2 framework can call
+ * these ops.
+ *
+ * registered: called when this subdev is registered. When called the v4l2_dev
+ *	field is set to the correct v4l2_device.
+ *
+ * unregistered: called when this subdev is unregistered. When called the
+ *	v4l2_dev field is still set to the correct v4l2_device.
+ */
+struct v4l2_subdev_internal_ops {
+	int (*registered)(struct v4l2_subdev *sd);
+	void (*unregistered)(struct v4l2_subdev *sd);
+};
+
 #define V4L2_SUBDEV_NAME_SIZE 32
 
 /* Set this flag if this subdev is a i2c device. */
@@ -431,6 +442,8 @@
 	u32 flags;
 	struct v4l2_device *v4l2_dev;
 	const struct v4l2_subdev_ops *ops;
+	/* Never call these internal ops from within a driver! */
+	const struct v4l2_subdev_internal_ops *internal_ops;
 	/* The control handler of this subdev. May be NULL. */
 	struct v4l2_ctrl_handler *ctrl_handler;
 	/* name must be unique */
diff --git a/include/net/dst.h b/include/net/dst.h
index 93b0310..23b564d 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -40,24 +40,10 @@
 	struct rcu_head		rcu_head;
 	struct dst_entry	*child;
 	struct net_device       *dev;
-	short			error;
-	short			obsolete;
-	int			flags;
-#define DST_HOST		0x0001
-#define DST_NOXFRM		0x0002
-#define DST_NOPOLICY		0x0004
-#define DST_NOHASH		0x0008
-#define DST_NOCACHE		0x0010
+	struct  dst_ops	        *ops;
+	unsigned long		_metrics;
 	unsigned long		expires;
-
-	unsigned short		header_len;	/* more space at head required */
-	unsigned short		trailer_len;	/* space to reserve at tail */
-
-	unsigned int		rate_tokens;
-	unsigned long		rate_last;	/* rate limiting for ICMP */
-
 	struct dst_entry	*path;
-
 	struct neighbour	*neighbour;
 	struct hh_cache		*hh;
 #ifdef CONFIG_XFRM
@@ -68,17 +54,16 @@
 	int			(*input)(struct sk_buff*);
 	int			(*output)(struct sk_buff*);
 
-	struct  dst_ops	        *ops;
-
-	u32			_metrics[RTAX_MAX];
-
-#ifdef CONFIG_NET_CLS_ROUTE
+	short			error;
+	short			obsolete;
+	unsigned short		header_len;	/* more space at head required */
+	unsigned short		trailer_len;	/* space to reserve at tail */
+#ifdef CONFIG_IP_ROUTE_CLASSID
 	__u32			tclassid;
 #else
 	__u32			__pad2;
 #endif
 
-
 	/*
 	 * Align __refcnt to a 64 bytes alignment
 	 * (L1_CACHE_SIZE would be too much)
@@ -93,6 +78,12 @@
 	atomic_t		__refcnt;	/* client references	*/
 	int			__use;
 	unsigned long		lastuse;
+	int			flags;
+#define DST_HOST		0x0001
+#define DST_NOXFRM		0x0002
+#define DST_NOPOLICY		0x0004
+#define DST_NOHASH		0x0008
+#define DST_NOCACHE		0x0010
 	union {
 		struct dst_entry	*next;
 		struct rtable __rcu	*rt_next;
@@ -103,10 +94,70 @@
 
 #ifdef __KERNEL__
 
+extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
+extern const u32 dst_default_metrics[RTAX_MAX];
+
+#define DST_METRICS_READ_ONLY	0x1UL
+#define __DST_METRICS_PTR(Y)	\
+	((u32 *)((Y) & ~DST_METRICS_READ_ONLY))
+#define DST_METRICS_PTR(X)	__DST_METRICS_PTR((X)->_metrics)
+
+static inline bool dst_metrics_read_only(const struct dst_entry *dst)
+{
+	return dst->_metrics & DST_METRICS_READ_ONLY;
+}
+
+extern void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
+
+static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
+{
+	unsigned long val = dst->_metrics;
+	if (!(val & DST_METRICS_READ_ONLY))
+		__dst_destroy_metrics_generic(dst, val);
+}
+
+static inline u32 *dst_metrics_write_ptr(struct dst_entry *dst)
+{
+	unsigned long p = dst->_metrics;
+
+	if (p & DST_METRICS_READ_ONLY)
+		return dst->ops->cow_metrics(dst, p);
+	return __DST_METRICS_PTR(p);
+}
+
+/* This may only be invoked before the entry has reached global
+ * visibility.
+ */
+static inline void dst_init_metrics(struct dst_entry *dst,
+				    const u32 *src_metrics,
+				    bool read_only)
+{
+	dst->_metrics = ((unsigned long) src_metrics) |
+		(read_only ? DST_METRICS_READ_ONLY : 0);
+}
+
+static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
+{
+	u32 *dst_metrics = dst_metrics_write_ptr(dest);
+
+	if (dst_metrics) {
+		u32 *src_metrics = DST_METRICS_PTR(src);
+
+		memcpy(dst_metrics, src_metrics, RTAX_MAX * sizeof(u32));
+	}
+}
+
+static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
+{
+	return DST_METRICS_PTR(dst);
+}
+
 static inline u32
 dst_metric_raw(const struct dst_entry *dst, const int metric)
 {
-	return dst->_metrics[metric-1];
+	u32 *p = DST_METRICS_PTR(dst);
+
+	return p[metric-1];
 }
 
 static inline u32
@@ -131,22 +182,10 @@
 
 static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
 {
-	dst->_metrics[metric-1] = val;
-}
+	u32 *p = dst_metrics_write_ptr(dst);
 
-static inline void dst_import_metrics(struct dst_entry *dst, const u32 *src_metrics)
-{
-	memcpy(dst->_metrics, src_metrics, RTAX_MAX * sizeof(u32));
-}
-
-static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
-{
-	dst_import_metrics(dest, src->_metrics);
-}
-
-static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
-{
-	return dst->_metrics;
+	if (p)
+		p[metric-1] = val;
 }
 
 static inline u32
@@ -181,8 +220,6 @@
 dst_allfrag(const struct dst_entry *dst)
 {
 	int ret = dst_feature(dst,  RTAX_FEATURE_ALLFRAG);
-	/* Yes, _exactly_. This is paranoia. */
-	barrier();
 	return ret;
 }
 
@@ -315,7 +352,7 @@
 }
 
 extern int dst_discard(struct sk_buff *skb);
-extern void * dst_alloc(struct dst_ops * ops);
+extern void *dst_alloc(struct dst_ops * ops, int initial_ref);
 extern void __dst_free(struct dst_entry * dst);
 extern struct dst_entry *dst_destroy(struct dst_entry * dst);
 
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 21a320b..dc07463 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -18,6 +18,7 @@
 	struct dst_entry *	(*check)(struct dst_entry *, __u32 cookie);
 	unsigned int		(*default_advmss)(const struct dst_entry *);
 	unsigned int		(*default_mtu)(const struct dst_entry *);
+	u32 *			(*cow_metrics)(struct dst_entry *, unsigned long);
 	void			(*destroy)(struct dst_entry *);
 	void			(*ifdown)(struct dst_entry *,
 					  struct net_device *dev, int how);
diff --git a/include/net/flow.h b/include/net/flow.h
index 240b7f3..1ae901f 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -48,7 +48,8 @@
 
 	__u8	proto;
 	__u8	flags;
-#define FLOWI_FLAG_ANYSRC 0x01
+#define FLOWI_FLAG_ANYSRC		0x01
+#define FLOWI_FLAG_PRECOW_METRICS	0x02
 	union {
 		struct {
 			__be16	sport;
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 8a64b81..b4c7c1c 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -195,7 +195,8 @@
  */
 static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr)
 {
-	nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
+	if (hdr)
+		nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
 }
 
 /**
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 6e991e0..f0698b9 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -45,7 +45,4 @@
 extern int	icmp_init(void);
 extern void	icmp_out_count(struct net *net, unsigned char type);
 
-/* Move into dst.h ? */
-extern int 	xrlim_allow(struct dst_entry *dst, int timeout);
-
 #endif	/* _ICMP_H */
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 8181498..6e6dfd7 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -219,7 +219,13 @@
 
 static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
 {
-	return inet_sk(sk)->transparent ? FLOWI_FLAG_ANYSRC : 0;
+	__u8 flags = 0;
+
+	if (inet_sk(sk)->transparent)
+		flags |= FLOWI_FLAG_ANYSRC;
+	if (sk->sk_protocol == IPPROTO_TCP)
+		flags |= FLOWI_FLAG_PRECOW_METRICS;
+	return flags;
 }
 
 #endif	/* _INET_SOCK_H */
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 599d96e..e6dd8da6 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -11,15 +11,20 @@
 #include <linux/init.h>
 #include <linux/jiffies.h>
 #include <linux/spinlock.h>
+#include <linux/rtnetlink.h>
 #include <net/ipv6.h>
 #include <asm/atomic.h>
 
-struct inetpeer_addr {
+struct inetpeer_addr_base {
 	union {
-		__be32		a4;
-		__be32		a6[4];
+		__be32			a4;
+		__be32			a6[4];
 	};
-	__u16	family;
+};
+
+struct inetpeer_addr {
+	struct inetpeer_addr_base	addr;
+	__u16				family;
 };
 
 struct inet_peer {
@@ -33,15 +38,22 @@
 	atomic_t		refcnt;
 	/*
 	 * Once inet_peer is queued for deletion (refcnt == -1), following fields
-	 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
-	 * We can share memory with rcu_head to keep inet_peer small
+	 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp, metrics
+	 * We can share memory with rcu_head to help keep inet_peer small.
 	 */
 	union {
 		struct {
-			atomic_t	rid;		/* Frag reception counter */
-			atomic_t	ip_id_count;	/* IP ID for the next packet */
-			__u32		tcp_ts;
-			__u32		tcp_ts_stamp;
+			atomic_t			rid;		/* Frag reception counter */
+			atomic_t			ip_id_count;	/* IP ID for the next packet */
+			__u32				tcp_ts;
+			__u32				tcp_ts_stamp;
+			u32				metrics[RTAX_MAX];
+			u32				rate_tokens;	/* rate limiting for ICMP */
+			unsigned long			rate_last;
+			unsigned long			pmtu_expires;
+			u32				pmtu_orig;
+			u32				pmtu_learned;
+			struct inetpeer_addr_base	redirect_learned;
 		};
 		struct rcu_head         rcu;
 	};
@@ -49,6 +61,13 @@
 
 void			inet_initpeers(void) __init;
 
+#define INETPEER_METRICS_NEW	(~(u32) 0)
+
+static inline bool inet_metrics_new(const struct inet_peer *p)
+{
+	return p->metrics[RTAX_LOCK-1] == INETPEER_METRICS_NEW;
+}
+
 /* can be called with or without local BH being disabled */
 struct inet_peer	*inet_getpeer(struct inetpeer_addr *daddr, int create);
 
@@ -56,7 +75,7 @@
 {
 	struct inetpeer_addr daddr;
 
-	daddr.a4 = v4daddr;
+	daddr.addr.a4 = v4daddr;
 	daddr.family = AF_INET;
 	return inet_getpeer(&daddr, create);
 }
@@ -65,13 +84,14 @@
 {
 	struct inetpeer_addr daddr;
 
-	ipv6_addr_copy((struct in6_addr *)daddr.a6, v6daddr);
+	ipv6_addr_copy((struct in6_addr *)daddr.addr.a6, v6daddr);
 	daddr.family = AF_INET6;
 	return inet_getpeer(&daddr, create);
 }
 
 /* can be called from BH context or outside */
 extern void inet_putpeer(struct inet_peer *p);
+extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
 
 /*
  * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 708ff7c..46a6e8a 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -108,6 +108,7 @@
 	u32				rt6i_flags;
 	struct rt6key			rt6i_src;
 	u32				rt6i_metric;
+	u32				rt6i_peer_genid;
 
 	struct inet6_dev		*rt6i_idev;
 	struct inet_peer		*rt6i_peer;
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 07bdb5e..523a170 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -55,7 +55,7 @@
 	int			nh_weight;
 	int			nh_power;
 #endif
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 	__u32			nh_tclassid;
 #endif
 	int			nh_oif;
@@ -77,7 +77,7 @@
 	int			fib_protocol;
 	__be32			fib_prefsrc;
 	u32			fib_priority;
-	u32			fib_metrics[RTAX_MAX];
+	u32			*fib_metrics;
 #define fib_mtu fib_metrics[RTAX_MTU-1]
 #define fib_window fib_metrics[RTAX_WINDOW-1]
 #define fib_rtt fib_metrics[RTAX_RTT-1]
@@ -96,12 +96,15 @@
 struct fib_rule;
 #endif
 
+struct fib_table;
 struct fib_result {
 	unsigned char	prefixlen;
 	unsigned char	nh_sel;
 	unsigned char	type;
 	unsigned char	scope;
 	struct fib_info *fi;
+	struct fib_table *table;
+	struct list_head *fa_head;
 #ifdef CONFIG_IP_MULTIPLE_TABLES
 	struct fib_rule	*r;
 #endif
@@ -155,9 +158,6 @@
 extern int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
 			  struct netlink_callback *cb);
 extern int fib_table_flush(struct fib_table *table);
-extern void fib_table_select_default(struct fib_table *table,
-				     const struct flowi *flp,
-				     struct fib_result *res);
 extern void fib_free_table(struct fib_table *tb);
 
 
@@ -201,8 +201,8 @@
 extern int __net_init fib4_rules_init(struct net *net);
 extern void __net_exit fib4_rules_exit(struct net *net);
 
-#ifdef CONFIG_NET_CLS_ROUTE
-extern u32 fib_rules_tclass(struct fib_result *res);
+#ifdef CONFIG_IP_ROUTE_CLASSID
+extern u32 fib_rules_tclass(const struct fib_result *res);
 #endif
 
 extern int fib_lookup(struct net *n, struct flowi *flp, struct fib_result *res);
@@ -218,8 +218,7 @@
 extern int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
 			       struct net_device *dev, __be32 *spec_dst,
 			       u32 *itag, u32 mark);
-extern void fib_select_default(struct net *net, const struct flowi *flp,
-			       struct fib_result *res);
+extern void fib_select_default(struct fib_result *res);
 
 /* Exported by fib_semantics.c */
 extern int ip_fib_check_default(__be32 gw, struct net_device *dev);
@@ -229,13 +228,13 @@
 extern __be32  __fib_res_prefsrc(struct fib_result *res);
 extern void fib_select_multipath(const struct flowi *flp, struct fib_result *res);
 
-/* Exported by fib_{hash|trie}.c */
-extern void fib_hash_init(void);
-extern struct fib_table *fib_hash_table(u32 id);
+/* Exported by fib_trie.c */
+extern void fib_trie_init(void);
+extern struct fib_table *fib_trie_table(u32 id);
 
-static inline void fib_combine_itag(u32 *itag, struct fib_result *res)
+static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
 {
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 #ifdef CONFIG_IP_MULTIPLE_TABLES
 	u32 rtag;
 #endif
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index b7bbd6c..5d75fea 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -28,6 +28,80 @@
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 #include <net/netfilter/nf_conntrack.h>
 #endif
+#include <net/net_namespace.h>		/* Netw namespace */
+
+/*
+ * Generic access of ipvs struct
+ */
+static inline struct netns_ipvs *net_ipvs(struct net* net)
+{
+	return net->ipvs;
+}
+/*
+ * Get net ptr from skb in traffic cases
+ * use skb_sknet when call is from userland (ioctl or netlink)
+ */
+static inline struct net *skb_net(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+#ifdef CONFIG_IP_VS_DEBUG
+	/*
+	 * This is used for debug only.
+	 * Start with the most likely hit
+	 * End with BUG
+	 */
+	if (likely(skb->dev && skb->dev->nd_net))
+		return dev_net(skb->dev);
+	if (skb_dst(skb)->dev)
+		return dev_net(skb_dst(skb)->dev);
+	WARN(skb->sk, "Maybe skb_sknet should be used in %s() at line:%d\n",
+		      __func__, __LINE__);
+	if (likely(skb->sk && skb->sk->sk_net))
+		return sock_net(skb->sk);
+	pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
+		__func__, __LINE__);
+	BUG();
+#else
+	return dev_net(skb->dev ? : skb_dst(skb)->dev);
+#endif
+#else
+	return &init_net;
+#endif
+}
+
+static inline struct net *skb_sknet(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_NS
+#ifdef CONFIG_IP_VS_DEBUG
+	/* Start with the most likely hit */
+	if (likely(skb->sk && skb->sk->sk_net))
+		return sock_net(skb->sk);
+	WARN(skb->dev, "Maybe skb_net should be used instead in %s() line:%d\n",
+		       __func__, __LINE__);
+	if (likely(skb->dev && skb->dev->nd_net))
+		return dev_net(skb->dev);
+	pr_err("There is no net ptr to find in the skb in %s() line:%d\n",
+		__func__, __LINE__);
+	BUG();
+#else
+	return sock_net(skb->sk);
+#endif
+#else
+	return &init_net;
+#endif
+}
+/*
+ * This one needed for single_open_net since net is stored directly in
+ * private not as a struct i.e. seq_file_net cant be used.
+ */
+static inline struct net *seq_file_single_net(struct seq_file *seq)
+{
+#ifdef CONFIG_NET_NS
+	return (struct net *)seq->private;
+#else
+	return &init_net;
+#endif
+}
 
 /* Connections' size value needed by ip_vs_ctl.c */
 extern int ip_vs_conn_tab_size;
@@ -258,6 +332,23 @@
 						   before last resized pkt */
 };
 
+/*
+ * counters per cpu
+ */
+struct ip_vs_counters {
+	__u32		conns;		/* connections scheduled */
+	__u32		inpkts;		/* incoming packets */
+	__u32		outpkts;	/* outgoing packets */
+	__u64		inbytes;	/* incoming bytes */
+	__u64		outbytes;	/* outgoing bytes */
+};
+/*
+ * Stats per cpu
+ */
+struct ip_vs_cpu_stats {
+	struct ip_vs_counters   ustats;
+	struct u64_stats_sync   syncp;
+};
 
 /*
  *	IPVS statistics objects
@@ -279,17 +370,34 @@
 };
 
 struct ip_vs_stats {
-	struct ip_vs_stats_user	ustats;         /* statistics */
+	struct ip_vs_stats_user	ustats;		/* statistics */
 	struct ip_vs_estimator	est;		/* estimator */
-
-	spinlock_t              lock;           /* spin lock */
+	struct ip_vs_cpu_stats	*cpustats;	/* per cpu counters */
+	spinlock_t		lock;		/* spin lock */
 };
 
+/*
+ * Helper Macros for per cpu
+ * ipvs->tot_stats->ustats.count
+ */
+#define IPVS_STAT_INC(ipvs, count)	\
+	__this_cpu_inc((ipvs)->ustats->count)
+
+#define IPVS_STAT_ADD(ipvs, count, value) \
+	do {\
+		write_seqcount_begin(per_cpu_ptr((ipvs)->ustats_seq, \
+				     raw_smp_processor_id())); \
+		__this_cpu_add((ipvs)->ustats->count, value); \
+		write_seqcount_end(per_cpu_ptr((ipvs)->ustats_seq, \
+				   raw_smp_processor_id())); \
+	} while (0)
+
 struct dst_entry;
 struct iphdr;
 struct ip_vs_conn;
 struct ip_vs_app;
 struct sk_buff;
+struct ip_vs_proto_data;
 
 struct ip_vs_protocol {
 	struct ip_vs_protocol	*next;
@@ -297,21 +405,22 @@
 	u16			protocol;
 	u16			num_states;
 	int			dont_defrag;
-	atomic_t		appcnt;		/* counter of proto app incs */
-	int			*timeout_table;	/* protocol timeout table */
 
 	void (*init)(struct ip_vs_protocol *pp);
 
 	void (*exit)(struct ip_vs_protocol *pp);
 
+	void (*init_netns)(struct net *net, struct ip_vs_proto_data *pd);
+
+	void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd);
+
 	int (*conn_schedule)(int af, struct sk_buff *skb,
-			     struct ip_vs_protocol *pp,
+			     struct ip_vs_proto_data *pd,
 			     int *verdict, struct ip_vs_conn **cpp);
 
 	struct ip_vs_conn *
 	(*conn_in_get)(int af,
 		       const struct sk_buff *skb,
-		       struct ip_vs_protocol *pp,
 		       const struct ip_vs_iphdr *iph,
 		       unsigned int proto_off,
 		       int inverse);
@@ -319,7 +428,6 @@
 	struct ip_vs_conn *
 	(*conn_out_get)(int af,
 			const struct sk_buff *skb,
-			struct ip_vs_protocol *pp,
 			const struct ip_vs_iphdr *iph,
 			unsigned int proto_off,
 			int inverse);
@@ -337,11 +445,11 @@
 
 	int (*state_transition)(struct ip_vs_conn *cp, int direction,
 				const struct sk_buff *skb,
-				struct ip_vs_protocol *pp);
+				struct ip_vs_proto_data *pd);
 
-	int (*register_app)(struct ip_vs_app *inc);
+	int (*register_app)(struct net *net, struct ip_vs_app *inc);
 
-	void (*unregister_app)(struct ip_vs_app *inc);
+	void (*unregister_app)(struct net *net, struct ip_vs_app *inc);
 
 	int (*app_conn_bind)(struct ip_vs_conn *cp);
 
@@ -350,14 +458,26 @@
 			     int offset,
 			     const char *msg);
 
-	void (*timeout_change)(struct ip_vs_protocol *pp, int flags);
-
-	int (*set_state_timeout)(struct ip_vs_protocol *pp, char *sname, int to);
+	void (*timeout_change)(struct ip_vs_proto_data *pd, int flags);
 };
 
-extern struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto);
+/*
+ * protocol data per netns
+ */
+struct ip_vs_proto_data {
+	struct ip_vs_proto_data	*next;
+	struct ip_vs_protocol	*pp;
+	int			*timeout_table;	/* protocol timeout table */
+	atomic_t		appcnt;		/* counter of proto app incs. */
+	struct tcp_states_t	*tcp_state_table;
+};
+
+extern struct ip_vs_protocol   *ip_vs_proto_get(unsigned short proto);
+extern struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net,
+						     unsigned short proto);
 
 struct ip_vs_conn_param {
+	struct net			*net;
 	const union nf_inet_addr	*caddr;
 	const union nf_inet_addr	*vaddr;
 	__be16				cport;
@@ -375,16 +495,19 @@
  */
 struct ip_vs_conn {
 	struct list_head        c_list;         /* hashed list heads */
-
+#ifdef CONFIG_NET_NS
+	struct net              *net;           /* Name space */
+#endif
 	/* Protocol, addresses and port numbers */
-	u16                      af;		/* address family */
-	union nf_inet_addr       caddr;          /* client address */
-	union nf_inet_addr       vaddr;          /* virtual address */
-	union nf_inet_addr       daddr;          /* destination address */
-	volatile __u32           flags;          /* status flags */
-	__be16                   cport;
-	__be16                   vport;
-	__be16                   dport;
+	u16                     af;             /* address family */
+	__be16                  cport;
+	__be16                  vport;
+	__be16                  dport;
+	__u32                   fwmark;         /* Fire wall mark from skb */
+	union nf_inet_addr      caddr;          /* client address */
+	union nf_inet_addr      vaddr;          /* virtual address */
+	union nf_inet_addr      daddr;          /* destination address */
+	volatile __u32          flags;          /* status flags */
 	__u16                   protocol;       /* Which protocol (TCP/UDP) */
 
 	/* counter and timer */
@@ -422,10 +545,38 @@
 	struct ip_vs_seq        in_seq;         /* incoming seq. struct */
 	struct ip_vs_seq        out_seq;        /* outgoing seq. struct */
 
+	const struct ip_vs_pe	*pe;
 	char			*pe_data;
 	__u8			pe_data_len;
 };
 
+/*
+ *  To save some memory in conn table when name space is disabled.
+ */
+static inline struct net *ip_vs_conn_net(const struct ip_vs_conn *cp)
+{
+#ifdef CONFIG_NET_NS
+	return cp->net;
+#else
+	return &init_net;
+#endif
+}
+static inline void ip_vs_conn_net_set(struct ip_vs_conn *cp, struct net *net)
+{
+#ifdef CONFIG_NET_NS
+	cp->net = net;
+#endif
+}
+
+static inline int ip_vs_conn_net_eq(const struct ip_vs_conn *cp,
+				    struct net *net)
+{
+#ifdef CONFIG_NET_NS
+	return cp->net == net;
+#else
+	return 1;
+#endif
+}
 
 /*
  *	Extended internal versions of struct ip_vs_service_user and
@@ -485,6 +636,7 @@
 	unsigned		flags;	  /* service status flags */
 	unsigned		timeout;  /* persistent timeout in ticks */
 	__be32			netmask;  /* grouping granularity */
+	struct net		*net;
 
 	struct list_head	destinations;  /* real server d-linked list */
 	__u32			num_dests;     /* number of servers */
@@ -510,8 +662,8 @@
 	struct list_head	d_list;   /* for table with all the dests */
 
 	u16			af;		/* address family */
-	union nf_inet_addr	addr;		/* IP address of the server */
 	__be16			port;		/* port number of the server */
+	union nf_inet_addr	addr;		/* IP address of the server */
 	volatile unsigned	flags;		/* dest status flags */
 	atomic_t		conn_flags;	/* flags to copy to conn */
 	atomic_t		weight;		/* server weight */
@@ -538,8 +690,8 @@
 	/* for virtual service */
 	struct ip_vs_service	*svc;		/* service it belongs to */
 	__u16			protocol;	/* which protocol (TCP/UDP) */
-	union nf_inet_addr	vaddr;		/* virtual IP address */
 	__be16			vport;		/* virtual port number */
+	union nf_inet_addr	vaddr;		/* virtual IP address */
 	__u32			vfwmark;	/* firewall mark of service */
 };
 
@@ -674,13 +826,14 @@
 	IP_VS_DIR_LAST,
 };
 
-static inline void ip_vs_conn_fill_param(int af, int protocol,
+static inline void ip_vs_conn_fill_param(struct net *net, int af, int protocol,
 					 const union nf_inet_addr *caddr,
 					 __be16 cport,
 					 const union nf_inet_addr *vaddr,
 					 __be16 vport,
 					 struct ip_vs_conn_param *p)
 {
+	p->net = net;
 	p->af = af;
 	p->protocol = protocol;
 	p->caddr = caddr;
@@ -695,7 +848,6 @@
 struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p);
 
 struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
-					    struct ip_vs_protocol *pp,
 					    const struct ip_vs_iphdr *iph,
 					    unsigned int proto_off,
 					    int inverse);
@@ -703,7 +855,6 @@
 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p);
 
 struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
-					     struct ip_vs_protocol *pp,
 					     const struct ip_vs_iphdr *iph,
 					     unsigned int proto_off,
 					     int inverse);
@@ -719,14 +870,14 @@
 struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p,
 				  const union nf_inet_addr *daddr,
 				  __be16 dport, unsigned flags,
-				  struct ip_vs_dest *dest);
+				  struct ip_vs_dest *dest, __u32 fwmark);
 extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
 
 extern const char * ip_vs_state_name(__u16 proto, int state);
 
-extern void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
+extern void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp);
 extern int ip_vs_check_template(struct ip_vs_conn *ct);
-extern void ip_vs_random_dropentry(void);
+extern void ip_vs_random_dropentry(struct net *net);
 extern int ip_vs_conn_init(void);
 extern void ip_vs_conn_cleanup(void);
 
@@ -796,12 +947,12 @@
  *      (from ip_vs_app.c)
  */
 #define IP_VS_APP_MAX_PORTS  8
-extern int register_ip_vs_app(struct ip_vs_app *app);
-extern void unregister_ip_vs_app(struct ip_vs_app *app);
+extern int register_ip_vs_app(struct net *net, struct ip_vs_app *app);
+extern void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
 extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
 extern void ip_vs_unbind_app(struct ip_vs_conn *cp);
-extern int
-register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port);
+extern int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app,
+				  __u16 proto, __u16 port);
 extern int ip_vs_app_inc_get(struct ip_vs_app *inc);
 extern void ip_vs_app_inc_put(struct ip_vs_app *inc);
 
@@ -814,15 +965,27 @@
 void ip_vs_unbind_pe(struct ip_vs_service *svc);
 int register_ip_vs_pe(struct ip_vs_pe *pe);
 int unregister_ip_vs_pe(struct ip_vs_pe *pe);
-extern struct ip_vs_pe *ip_vs_pe_get(const char *name);
-extern void ip_vs_pe_put(struct ip_vs_pe *pe);
+struct ip_vs_pe *ip_vs_pe_getbyname(const char *name);
+struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
+
+static inline void ip_vs_pe_get(const struct ip_vs_pe *pe)
+{
+	if (pe && pe->module)
+		__module_get(pe->module);
+}
+
+static inline void ip_vs_pe_put(const struct ip_vs_pe *pe)
+{
+	if (pe && pe->module)
+		module_put(pe->module);
+}
 
 /*
  *	IPVS protocol functions (from ip_vs_proto.c)
  */
 extern int ip_vs_protocol_init(void);
 extern void ip_vs_protocol_cleanup(void);
-extern void ip_vs_protocol_timeout_change(int flags);
+extern void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
 extern int *ip_vs_create_timeout_table(int *table, int size);
 extern int
 ip_vs_set_state_timeout(int *table, int num, const char *const *names,
@@ -852,26 +1015,21 @@
 extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
 extern struct ip_vs_conn *
 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
-	       struct ip_vs_protocol *pp, int *ignored);
+	       struct ip_vs_proto_data *pd, int *ignored);
 extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
-			struct ip_vs_protocol *pp);
+			struct ip_vs_proto_data *pd);
 
 
 /*
  *      IPVS control data and functions (from ip_vs_ctl.c)
  */
-extern int sysctl_ip_vs_cache_bypass;
-extern int sysctl_ip_vs_expire_nodest_conn;
-extern int sysctl_ip_vs_expire_quiescent_template;
-extern int sysctl_ip_vs_sync_threshold[2];
-extern int sysctl_ip_vs_nat_icmp_send;
-extern int sysctl_ip_vs_conntrack;
-extern int sysctl_ip_vs_snat_reroute;
 extern struct ip_vs_stats ip_vs_stats;
 extern const struct ctl_path net_vs_ctl_path[];
+extern int sysctl_ip_vs_sync_ver;
 
+extern void ip_vs_sync_switch_mode(struct net *net, int mode);
 extern struct ip_vs_service *
-ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
+ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol,
 		  const union nf_inet_addr *vaddr, __be16 vport);
 
 static inline void ip_vs_service_put(struct ip_vs_service *svc)
@@ -880,7 +1038,7 @@
 }
 
 extern struct ip_vs_dest *
-ip_vs_lookup_real_service(int af, __u16 protocol,
+ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol,
 			  const union nf_inet_addr *daddr, __be16 dport);
 
 extern int ip_vs_use_count_inc(void);
@@ -888,8 +1046,9 @@
 extern int ip_vs_control_init(void);
 extern void ip_vs_control_cleanup(void);
 extern struct ip_vs_dest *
-ip_vs_find_dest(int af, const union nf_inet_addr *daddr, __be16 dport,
-		const union nf_inet_addr *vaddr, __be16 vport, __u16 protocol);
+ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
+		__be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
+		__u16 protocol, __u32 fwmark);
 extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
 
 
@@ -897,14 +1056,12 @@
  *      IPVS sync daemon data and function prototypes
  *      (from ip_vs_sync.c)
  */
-extern volatile int ip_vs_sync_state;
-extern volatile int ip_vs_master_syncid;
-extern volatile int ip_vs_backup_syncid;
-extern char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-extern char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-extern int start_sync_thread(int state, char *mcast_ifn, __u8 syncid);
-extern int stop_sync_thread(int state);
-extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
+extern int start_sync_thread(struct net *net, int state, char *mcast_ifn,
+			     __u8 syncid);
+extern int stop_sync_thread(struct net *net, int state);
+extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp);
+extern int ip_vs_sync_init(void);
+extern void ip_vs_sync_cleanup(void);
 
 
 /*
@@ -912,8 +1069,8 @@
  */
 extern int ip_vs_estimator_init(void);
 extern void ip_vs_estimator_cleanup(void);
-extern void ip_vs_new_estimator(struct ip_vs_stats *stats);
-extern void ip_vs_kill_estimator(struct ip_vs_stats *stats);
+extern void ip_vs_new_estimator(struct net *net, struct ip_vs_stats *stats);
+extern void ip_vs_kill_estimator(struct net *net, struct ip_vs_stats *stats);
 extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
 
 /*
@@ -952,14 +1109,14 @@
  *	we are loaded. Just set ip_vs_drop_rate to 'n' and
  *	we start to drop 1/rate of the packets
  */
-extern int ip_vs_drop_rate;
-extern int ip_vs_drop_counter;
 
-static __inline__ int ip_vs_todrop(void)
+static inline int ip_vs_todrop(struct netns_ipvs *ipvs)
 {
-	if (!ip_vs_drop_rate) return 0;
-	if (--ip_vs_drop_counter > 0) return 0;
-	ip_vs_drop_counter = ip_vs_drop_rate;
+	if (!ipvs->drop_rate)
+		return 0;
+	if (--ipvs->drop_counter > 0)
+		return 0;
+	ipvs->drop_counter = ipvs->drop_rate;
 	return 1;
 }
 
@@ -1047,9 +1204,9 @@
  *      Netfilter connection tracking
  *      (from ip_vs_nfct.c)
  */
-static inline int ip_vs_conntrack_enabled(void)
+static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
 {
-	return sysctl_ip_vs_conntrack;
+	return ipvs->sysctl_conntrack;
 }
 
 extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
@@ -1062,7 +1219,7 @@
 
 #else
 
-static inline int ip_vs_conntrack_enabled(void)
+static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
 {
 	return 0;
 }
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 1bf812b..b3b4a34 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -20,6 +20,7 @@
 #include <net/netns/conntrack.h>
 #endif
 #include <net/netns/xfrm.h>
+#include <net/netns/ip_vs.h>
 
 struct proc_dir_entry;
 struct net_device;
@@ -94,6 +95,7 @@
 #ifdef CONFIG_XFRM
 	struct netns_xfrm	xfrm;
 #endif
+	struct netns_ipvs	*ipvs;
 };
 
 
diff --git a/include/net/netevent.h b/include/net/netevent.h
index e82b7ba..22b239c 100644
--- a/include/net/netevent.h
+++ b/include/net/netevent.h
@@ -21,7 +21,6 @@
 
 enum netevent_notif_type {
 	NETEVENT_NEIGH_UPDATE = 1, /* arg is struct neighbour ptr */
-	NETEVENT_PMTU_UPDATE,	   /* arg is struct dst_entry ptr */
 	NETEVENT_REDIRECT,	   /* arg is struct netevent_redirect ptr */
 };
 
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index d85cff1..d0d1337 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -50,11 +50,24 @@
 /* per conntrack: application helper private data */
 union nf_conntrack_help {
 	/* insert conntrack helper private data (master) here */
+#if defined(CONFIG_NF_CONNTRACK_FTP) || defined(CONFIG_NF_CONNTRACK_FTP_MODULE)
 	struct nf_ct_ftp_master ct_ftp_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_PPTP) || \
+    defined(CONFIG_NF_CONNTRACK_PPTP_MODULE)
 	struct nf_ct_pptp_master ct_pptp_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_H323) || \
+    defined(CONFIG_NF_CONNTRACK_H323_MODULE)
 	struct nf_ct_h323_master ct_h323_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_SANE) || \
+    defined(CONFIG_NF_CONNTRACK_SANE_MODULE)
 	struct nf_ct_sane_master ct_sane_info;
+#endif
+#if defined(CONFIG_NF_CONNTRACK_SIP) || defined(CONFIG_NF_CONNTRACK_SIP_MODULE)
 	struct nf_ct_sip_master ct_sip_info;
+#endif
 };
 
 #include <linux/types.h>
@@ -116,14 +129,14 @@
 	u_int32_t secmark;
 #endif
 
-	/* Storage reserved for other modules: */
-	union nf_conntrack_proto proto;
-
 	/* Extensions */
 	struct nf_ct_ext *ext;
 #ifdef CONFIG_NET_NS
 	struct net *ct_net;
 #endif
+
+	/* Storage reserved for other modules, must be the last member */
+	union nf_conntrack_proto proto;
 };
 
 static inline struct nf_conn *
@@ -189,9 +202,9 @@
  * Allocate a hashtable of hlist_head (if nulls == 0),
  * or hlist_nulls_head (if nulls == 1)
  */
-extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls);
+extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
 
-extern void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size);
+extern void nf_ct_free_hashtable(void *hash, unsigned int size);
 
 extern struct nf_conntrack_tuple_hash *
 __nf_conntrack_find(struct net *net, u16 zone,
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 96ba5f7..4283508 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -23,12 +23,17 @@
 static inline struct nf_conntrack_ecache *
 nf_ct_ecache_find(const struct nf_conn *ct)
 {
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
 	return nf_ct_ext_find(ct, NF_CT_EXT_ECACHE);
+#else
+	return NULL;
+#endif
 }
 
 static inline struct nf_conntrack_ecache *
 nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
 {
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
 	struct net *net = nf_ct_net(ct);
 	struct nf_conntrack_ecache *e;
 
@@ -45,6 +50,9 @@
 		e->expmask = expmask;
 	}
 	return e;
+#else
+	return NULL;
+#endif
 };
 
 #ifdef CONFIG_NF_CONNTRACK_EVENTS
@@ -59,7 +67,7 @@
 	int (*fcn)(unsigned int events, struct nf_ct_event *item);
 };
 
-extern struct nf_ct_event_notifier *nf_conntrack_event_cb;
+extern struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
 extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb);
 extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb);
 
@@ -77,9 +85,6 @@
 	if (e == NULL)
 		return;
 
-	if (!(e->ctmask & (1 << event)))
-		return;
-
 	set_bit(event, &e->cache);
 }
 
@@ -159,7 +164,7 @@
 	int (*fcn)(unsigned int events, struct nf_exp_event *item);
 };
 
-extern struct nf_exp_event_notifier *nf_expect_event_cb;
+extern struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
 extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb);
 extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb);
 
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 0772d29..2dcf317 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -7,10 +7,19 @@
 
 enum nf_ct_ext_id {
 	NF_CT_EXT_HELPER,
+#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
 	NF_CT_EXT_NAT,
+#endif
 	NF_CT_EXT_ACCT,
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
 	NF_CT_EXT_ECACHE,
+#endif
+#ifdef CONFIG_NF_CONNTRACK_ZONES
 	NF_CT_EXT_ZONE,
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+	NF_CT_EXT_TSTAMP,
+#endif
 	NF_CT_EXT_NUM,
 };
 
@@ -19,6 +28,7 @@
 #define NF_CT_EXT_ACCT_TYPE struct nf_conn_counter
 #define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache
 #define NF_CT_EXT_ZONE_TYPE struct nf_conntrack_zone
+#define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp
 
 /* Extensions: optional stuff which isn't permanently in struct. */
 struct nf_ct_ext {
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 32c305d..f1c1311 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -63,4 +63,10 @@
 extern int nf_conntrack_helper_init(void);
 extern void nf_conntrack_helper_fini(void);
 
+extern int nf_conntrack_broadcast_help(struct sk_buff *skb,
+				       unsigned int protoff,
+				       struct nf_conn *ct,
+				       enum ip_conntrack_info ctinfo,
+				       unsigned int timeout);
+
 #endif /*_NF_CONNTRACK_HELPER_H*/
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index a754761..e8010f4 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -73,7 +73,7 @@
 	struct module *me;
 };
 
-extern struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX];
+extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX];
 
 /* Protocol registration. */
 extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto);
diff --git a/include/net/netfilter/nf_conntrack_timestamp.h b/include/net/netfilter/nf_conntrack_timestamp.h
new file mode 100644
index 0000000..fc9c82b
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_timestamp.h
@@ -0,0 +1,65 @@
+#ifndef _NF_CONNTRACK_TSTAMP_H
+#define _NF_CONNTRACK_TSTAMP_H
+
+#include <net/net_namespace.h>
+#include <linux/netfilter/nf_conntrack_common.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+
+struct nf_conn_tstamp {
+	u_int64_t start;
+	u_int64_t stop;
+};
+
+static inline
+struct nf_conn_tstamp *nf_conn_tstamp_find(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+	return nf_ct_ext_find(ct, NF_CT_EXT_TSTAMP);
+#else
+	return NULL;
+#endif
+}
+
+static inline
+struct nf_conn_tstamp *nf_ct_tstamp_ext_add(struct nf_conn *ct, gfp_t gfp)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+	struct net *net = nf_ct_net(ct);
+
+	if (!net->ct.sysctl_tstamp)
+		return NULL;
+
+	return nf_ct_ext_add(ct, NF_CT_EXT_TSTAMP, gfp);
+#else
+	return NULL;
+#endif
+};
+
+static inline bool nf_ct_tstamp_enabled(struct net *net)
+{
+	return net->ct.sysctl_tstamp != 0;
+}
+
+static inline void nf_ct_set_tstamp(struct net *net, bool enable)
+{
+	net->ct.sysctl_tstamp = enable;
+}
+
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+extern int nf_conntrack_tstamp_init(struct net *net);
+extern void nf_conntrack_tstamp_fini(struct net *net);
+#else
+static inline int nf_conntrack_tstamp_init(struct net *net)
+{
+	return 0;
+}
+
+static inline void nf_conntrack_tstamp_fini(struct net *net)
+{
+	return;
+}
+#endif /* CONFIG_NF_CONNTRACK_TIMESTAMP */
+
+#endif /* _NF_CONNTRACK_TSTAMP_H */
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index f5f09f0..aff80b1 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -56,7 +56,9 @@
 /* per conntrack: nat application helper private data */
 union nf_conntrack_nat_help {
 	/* insert nat helper private data here */
+#if defined(CONFIG_NF_NAT_PPTP) || defined(CONFIG_NF_NAT_PPTP_MODULE)
 	struct nf_nat_pptp nat_pptp_info;
+#endif
 };
 
 struct nf_conn;
@@ -84,7 +86,11 @@
 
 static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct)
 {
+#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
 	return nf_ct_ext_find(ct, NF_CT_EXT_NAT);
+#else
+	return NULL;
+#endif
 }
 
 #else  /* !__KERNEL__: iptables wants this to compile. */
diff --git a/include/net/netfilter/nf_nat_core.h b/include/net/netfilter/nf_nat_core.h
index 33602ab..3dc7b98 100644
--- a/include/net/netfilter/nf_nat_core.h
+++ b/include/net/netfilter/nf_nat_core.h
@@ -21,9 +21,9 @@
 				     enum nf_nat_manip_type manip)
 {
 	if (manip == IP_NAT_MANIP_SRC)
-		return test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
+		return ct->status & IPS_SRC_NAT_DONE;
 	else
-		return test_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
+		return ct->status & IPS_DST_NAT_DONE;
 }
 
 struct nlattr;
diff --git a/include/net/netfilter/nf_tproxy_core.h b/include/net/netfilter/nf_tproxy_core.h
index cd85b3b..e505358 100644
--- a/include/net/netfilter/nf_tproxy_core.h
+++ b/include/net/netfilter/nf_tproxy_core.h
@@ -201,18 +201,8 @@
 }
 #endif
 
-static inline void
-nf_tproxy_put_sock(struct sock *sk)
-{
-	/* TIME_WAIT inet sockets have to be handled differently */
-	if ((sk->sk_protocol == IPPROTO_TCP) && (sk->sk_state == TCP_TIME_WAIT))
-		inet_twsk_put(inet_twsk(sk));
-	else
-		sock_put(sk);
-}
-
 /* assign a socket to the skb -- consumes sk */
-int
+void
 nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk);
 
 #endif
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 373f1a9..8a3906a 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -856,18 +856,27 @@
 #define NLA_PUT_BE16(skb, attrtype, value) \
 	NLA_PUT_TYPE(skb, __be16, attrtype, value)
 
+#define NLA_PUT_NET16(skb, attrtype, value) \
+	NLA_PUT_BE16(skb, attrtype | NLA_F_NET_BYTEORDER, value)
+
 #define NLA_PUT_U32(skb, attrtype, value) \
 	NLA_PUT_TYPE(skb, u32, attrtype, value)
 
 #define NLA_PUT_BE32(skb, attrtype, value) \
 	NLA_PUT_TYPE(skb, __be32, attrtype, value)
 
+#define NLA_PUT_NET32(skb, attrtype, value) \
+	NLA_PUT_BE32(skb, attrtype | NLA_F_NET_BYTEORDER, value)
+
 #define NLA_PUT_U64(skb, attrtype, value) \
 	NLA_PUT_TYPE(skb, u64, attrtype, value)
 
 #define NLA_PUT_BE64(skb, attrtype, value) \
 	NLA_PUT_TYPE(skb, __be64, attrtype, value)
 
+#define NLA_PUT_NET64(skb, attrtype, value) \
+	NLA_PUT_BE64(skb, attrtype | NLA_F_NET_BYTEORDER, value)
+
 #define NLA_PUT_STRING(skb, attrtype, value) \
 	NLA_PUT(skb, attrtype, strlen(value) + 1, value)
 
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index d4958d4..341eb08 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -21,15 +21,15 @@
 	int			sysctl_events;
 	unsigned int		sysctl_events_retry_timeout;
 	int			sysctl_acct;
+	int			sysctl_tstamp;
 	int			sysctl_checksum;
 	unsigned int		sysctl_log_invalid; /* Log invalid packets */
 #ifdef CONFIG_SYSCTL
 	struct ctl_table_header	*sysctl_header;
 	struct ctl_table_header	*acct_sysctl_header;
+	struct ctl_table_header	*tstamp_sysctl_header;
 	struct ctl_table_header	*event_sysctl_header;
 #endif
-	int			hash_vmalloc;
-	int			expect_vmalloc;
 	char			*slabname;
 };
 #endif
diff --git a/include/net/netns/ip_vs.h b/include/net/netns/ip_vs.h
new file mode 100644
index 0000000..259ebac
--- /dev/null
+++ b/include/net/netns/ip_vs.h
@@ -0,0 +1,143 @@
+/*
+ *  IP Virtual Server
+ *  Data structure for network namspace
+ *
+ */
+
+#ifndef IP_VS_H_
+#define IP_VS_H_
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/list_nulls.h>
+#include <linux/ip_vs.h>
+#include <asm/atomic.h>
+#include <linux/in.h>
+
+struct ip_vs_stats;
+struct ip_vs_sync_buff;
+struct ctl_table_header;
+
+struct netns_ipvs {
+	int			gen;		/* Generation */
+	/*
+	 *	Hash table: for real service lookups
+	 */
+	#define IP_VS_RTAB_BITS 4
+	#define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
+	#define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
+
+	struct list_head	rs_table[IP_VS_RTAB_SIZE];
+	/* ip_vs_app */
+	struct list_head	app_list;
+	struct mutex		app_mutex;
+	struct lock_class_key	app_key;	/* mutex debuging */
+
+	/* ip_vs_proto */
+	#define IP_VS_PROTO_TAB_SIZE	32	/* must be power of 2 */
+	struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE];
+	/* ip_vs_proto_tcp */
+#ifdef CONFIG_IP_VS_PROTO_TCP
+	#define	TCP_APP_TAB_BITS	4
+	#define	TCP_APP_TAB_SIZE	(1 << TCP_APP_TAB_BITS)
+	#define	TCP_APP_TAB_MASK	(TCP_APP_TAB_SIZE - 1)
+	struct list_head	tcp_apps[TCP_APP_TAB_SIZE];
+	spinlock_t		tcp_app_lock;
+#endif
+	/* ip_vs_proto_udp */
+#ifdef CONFIG_IP_VS_PROTO_UDP
+	#define	UDP_APP_TAB_BITS	4
+	#define	UDP_APP_TAB_SIZE	(1 << UDP_APP_TAB_BITS)
+	#define	UDP_APP_TAB_MASK	(UDP_APP_TAB_SIZE - 1)
+	struct list_head	udp_apps[UDP_APP_TAB_SIZE];
+	spinlock_t		udp_app_lock;
+#endif
+	/* ip_vs_proto_sctp */
+#ifdef CONFIG_IP_VS_PROTO_SCTP
+	#define SCTP_APP_TAB_BITS	4
+	#define SCTP_APP_TAB_SIZE	(1 << SCTP_APP_TAB_BITS)
+	#define SCTP_APP_TAB_MASK	(SCTP_APP_TAB_SIZE - 1)
+	/* Hash table for SCTP application incarnations	 */
+	struct list_head	sctp_apps[SCTP_APP_TAB_SIZE];
+	spinlock_t		sctp_app_lock;
+#endif
+	/* ip_vs_conn */
+	atomic_t		conn_count;      /*  connection counter */
+
+	/* ip_vs_ctl */
+	struct ip_vs_stats		*tot_stats;  /* Statistics & est. */
+	struct ip_vs_cpu_stats __percpu *cpustats;   /* Stats per cpu */
+	seqcount_t			*ustats_seq; /* u64 read retry */
+
+	int			num_services;    /* no of virtual services */
+	/* 1/rate drop and drop-entry variables */
+	struct delayed_work	defense_work;   /* Work handler */
+	int			drop_rate;
+	int			drop_counter;
+	atomic_t		dropentry;
+	/* locks in ctl.c */
+	spinlock_t		dropentry_lock;  /* drop entry handling */
+	spinlock_t		droppacket_lock; /* drop packet handling */
+	spinlock_t		securetcp_lock;  /* state and timeout tables */
+	rwlock_t		rs_lock;         /* real services table */
+	/* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */
+	struct lock_class_key	ctl_key;	/* ctl_mutex debuging */
+	/* Trash for destinations */
+	struct list_head	dest_trash;
+	/* Service counters */
+	atomic_t		ftpsvc_counter;
+	atomic_t		nullsvc_counter;
+
+	/* sys-ctl struct */
+	struct ctl_table_header	*sysctl_hdr;
+	struct ctl_table	*sysctl_tbl;
+	/* sysctl variables */
+	int			sysctl_amemthresh;
+	int			sysctl_am_droprate;
+	int			sysctl_drop_entry;
+	int			sysctl_drop_packet;
+	int			sysctl_secure_tcp;
+#ifdef CONFIG_IP_VS_NFCT
+	int			sysctl_conntrack;
+#endif
+	int			sysctl_snat_reroute;
+	int			sysctl_sync_ver;
+	int			sysctl_cache_bypass;
+	int			sysctl_expire_nodest_conn;
+	int			sysctl_expire_quiescent_template;
+	int			sysctl_sync_threshold[2];
+	int			sysctl_nat_icmp_send;
+
+	/* ip_vs_lblc */
+	int			sysctl_lblc_expiration;
+	struct ctl_table_header	*lblc_ctl_header;
+	struct ctl_table	*lblc_ctl_table;
+	/* ip_vs_lblcr */
+	int			sysctl_lblcr_expiration;
+	struct ctl_table_header	*lblcr_ctl_header;
+	struct ctl_table	*lblcr_ctl_table;
+	/* ip_vs_est */
+	struct list_head	est_list;	/* estimator list */
+	spinlock_t		est_lock;
+	struct timer_list	est_timer;	/* Estimation timer */
+	/* ip_vs_sync */
+	struct list_head	sync_queue;
+	spinlock_t		sync_lock;
+	struct ip_vs_sync_buff  *sync_buff;
+	spinlock_t		sync_buff_lock;
+	struct sockaddr_in	sync_mcast_addr;
+	struct task_struct	*master_thread;
+	struct task_struct	*backup_thread;
+	int			send_mesg_maxlen;
+	int			recv_mesg_maxlen;
+	volatile int		sync_state;
+	volatile int		master_syncid;
+	volatile int		backup_syncid;
+	/* multicast interface name */
+	char			master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+	char			backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
+	/* net name space ptr */
+	struct net		*net;            /* Needed by timer routines */
+};
+
+#endif /* IP_VS_H_ */
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index d68c3f1..e2e2ef5 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -43,7 +43,6 @@
 	struct xt_table		*nat_table;
 	struct hlist_head	*nat_bysource;
 	unsigned int		nat_htable_size;
-	int			nat_vmalloced;
 #endif
 
 	int sysctl_icmp_echo_ignore_all;
diff --git a/include/net/protocol.h b/include/net/protocol.h
index dc07495..6f7eb80 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -38,7 +38,7 @@
 	void			(*err_handler)(struct sk_buff *skb, u32 info);
 	int			(*gso_send_check)(struct sk_buff *skb);
 	struct sk_buff	       *(*gso_segment)(struct sk_buff *skb,
-					       int features);
+					       u32 features);
 	struct sk_buff	      **(*gro_receive)(struct sk_buff **head,
 					       struct sk_buff *skb);
 	int			(*gro_complete)(struct sk_buff *skb);
@@ -57,7 +57,7 @@
 
 	int	(*gso_send_check)(struct sk_buff *skb);
 	struct sk_buff *(*gso_segment)(struct sk_buff *skb,
-				       int features);
+				       u32 features);
 	struct sk_buff **(*gro_receive)(struct sk_buff **head,
 					struct sk_buff *skb);
 	int	(*gro_complete)(struct sk_buff *skb);
diff --git a/include/net/route.h b/include/net/route.h
index 93e10c4..bf790c1 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -49,6 +49,7 @@
 
 struct fib_nh;
 struct inet_peer;
+struct fib_info;
 struct rtable {
 	struct dst_entry	dst;
 
@@ -68,7 +69,9 @@
 
 	/* Miscellaneous cached information */
 	__be32			rt_spec_dst; /* RFC1122 specific destination */
+	u32			rt_peer_genid;
 	struct inet_peer	*peer; /* long-living peer info */
+	struct fib_info		*fi; /* for client ref to shared metrics */
 };
 
 static inline bool rt_is_input_route(struct rtable *rt)
@@ -180,6 +183,8 @@
 
 	if (inet_sk(sk)->transparent)
 		fl.flags |= FLOWI_FLAG_ANYSRC;
+	if (protocol == IPPROTO_TCP)
+		fl.flags |= FLOWI_FLAG_PRECOW_METRICS;
 
 	if (!dst || !src) {
 		err = __ip_route_output_key(net, rp, &fl);
@@ -207,6 +212,8 @@
 		fl.proto = protocol;
 		if (inet_sk(sk)->transparent)
 			fl.flags |= FLOWI_FLAG_ANYSRC;
+		if (protocol == IPPROTO_TCP)
+			fl.flags |= FLOWI_FLAG_PRECOW_METRICS;
 		ip_rt_put(*rp);
 		*rp = NULL;
 		security_sk_classify_flow(sk, &fl);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index e9eee99..16626a0 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -31,10 +31,12 @@
  * following bits are only changed while qdisc lock is held
  */
 enum qdisc___state_t {
-	__QDISC___STATE_RUNNING,
+	__QDISC___STATE_RUNNING = 1,
+	__QDISC___STATE_THROTTLED = 2,
 };
 
 struct qdisc_size_table {
+	struct rcu_head		rcu;
 	struct list_head	list;
 	struct tc_sizespec	szopts;
 	int			refcnt;
@@ -46,14 +48,13 @@
 	struct sk_buff *	(*dequeue)(struct Qdisc *dev);
 	unsigned		flags;
 #define TCQ_F_BUILTIN		1
-#define TCQ_F_THROTTLED		2
-#define TCQ_F_INGRESS		4
-#define TCQ_F_CAN_BYPASS	8
-#define TCQ_F_MQROOT		16
+#define TCQ_F_INGRESS		2
+#define TCQ_F_CAN_BYPASS	4
+#define TCQ_F_MQROOT		8
 #define TCQ_F_WARN_NONWC	(1 << 16)
 	int			padded;
 	struct Qdisc_ops	*ops;
-	struct qdisc_size_table	*stab;
+	struct qdisc_size_table	__rcu *stab;
 	struct list_head	list;
 	u32			handle;
 	u32			parent;
@@ -78,25 +79,43 @@
 	unsigned long		state;
 	struct sk_buff_head	q;
 	struct gnet_stats_basic_packed bstats;
-	unsigned long		__state;
+	unsigned int		__state;
 	struct gnet_stats_queue	qstats;
 	struct rcu_head		rcu_head;
 	spinlock_t		busylock;
 };
 
-static inline bool qdisc_is_running(struct Qdisc *qdisc)
+static inline bool qdisc_is_running(const struct Qdisc *qdisc)
 {
-	return test_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+	return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false;
 }
 
 static inline bool qdisc_run_begin(struct Qdisc *qdisc)
 {
-	return !__test_and_set_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+	if (qdisc_is_running(qdisc))
+		return false;
+	qdisc->__state |= __QDISC___STATE_RUNNING;
+	return true;
 }
 
 static inline void qdisc_run_end(struct Qdisc *qdisc)
 {
-	__clear_bit(__QDISC___STATE_RUNNING, &qdisc->__state);
+	qdisc->__state &= ~__QDISC___STATE_RUNNING;
+}
+
+static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
+{
+	return (qdisc->__state & __QDISC___STATE_THROTTLED) ? true : false;
+}
+
+static inline void qdisc_throttled(struct Qdisc *qdisc)
+{
+	qdisc->__state |= __QDISC___STATE_THROTTLED;
+}
+
+static inline void qdisc_unthrottled(struct Qdisc *qdisc)
+{
+	qdisc->__state &= ~__QDISC___STATE_THROTTLED;
 }
 
 struct Qdisc_class_ops {
@@ -331,8 +350,8 @@
 				 struct Qdisc_ops *ops);
 extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
 				       struct Qdisc_ops *ops, u32 parentid);
-extern void qdisc_calculate_pkt_len(struct sk_buff *skb,
-				   struct qdisc_size_table *stab);
+extern void __qdisc_calculate_pkt_len(struct sk_buff *skb,
+				      const struct qdisc_size_table *stab);
 extern void tcf_destroy(struct tcf_proto *tp);
 extern void tcf_destroy_chain(struct tcf_proto **fl);
 
@@ -411,12 +430,20 @@
 #define net_xmit_drop_count(e)	(1)
 #endif
 
-static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
+					   const struct Qdisc *sch)
 {
 #ifdef CONFIG_NET_SCHED
-	if (sch->stab)
-		qdisc_calculate_pkt_len(skb, sch->stab);
+	struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
+
+	if (stab)
+		__qdisc_calculate_pkt_len(skb, stab);
 #endif
+}
+
+static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+	qdisc_calculate_pkt_len(skb, sch);
 	return sch->enqueue(skb, sch);
 }
 
@@ -445,7 +472,6 @@
 {
 	__skb_queue_tail(list, skb);
 	sch->qstats.backlog += qdisc_pkt_len(skb);
-	qdisc_bstats_update(sch, skb);
 
 	return NET_XMIT_SUCCESS;
 }
@@ -460,8 +486,10 @@
 {
 	struct sk_buff *skb = __skb_dequeue(list);
 
-	if (likely(skb != NULL))
+	if (likely(skb != NULL)) {
 		sch->qstats.backlog -= qdisc_pkt_len(skb);
+		qdisc_bstats_update(sch, skb);
+	}
 
 	return skb;
 }
@@ -474,10 +502,11 @@
 static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
 					      struct sk_buff_head *list)
 {
-	struct sk_buff *skb = __qdisc_dequeue_head(sch, list);
+	struct sk_buff *skb = __skb_dequeue(list);
 
 	if (likely(skb != NULL)) {
 		unsigned int len = qdisc_pkt_len(skb);
+		sch->qstats.backlog -= len;
 		kfree_skb(skb);
 		return len;
 	}
diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h
index 2a128c8..e73ebda 100644
--- a/include/net/sctp/user.h
+++ b/include/net/sctp/user.h
@@ -78,6 +78,7 @@
 #define SCTP_GET_PEER_ADDR_INFO	15
 #define SCTP_DELAYED_ACK_TIME	16
 #define SCTP_DELAYED_ACK SCTP_DELAYED_ACK_TIME
+#define SCTP_DELAYED_SACK SCTP_DELAYED_ACK_TIME
 #define SCTP_CONTEXT	17
 #define SCTP_FRAGMENT_INTERLEAVE	18
 #define SCTP_PARTIAL_DELIVERY_POINT	19 /* Set/Get partial delivery point */
diff --git a/include/net/sock.h b/include/net/sock.h
index d884d26..da0534d 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -281,7 +281,7 @@
 	int			sk_rcvbuf;
 
 	struct sk_filter __rcu	*sk_filter;
-	struct socket_wq	*sk_wq;
+	struct socket_wq __rcu	*sk_wq;
 
 #ifdef CONFIG_NET_DMA
 	struct sk_buff_head	sk_async_wait_queue;
@@ -753,6 +753,8 @@
 					int level,
 					int optname, char __user *optval,
 					int __user *option);
+	int			(*compat_ioctl)(struct sock *sk,
+					unsigned int cmd, unsigned long arg);
 #endif
 	int			(*sendmsg)(struct kiocb *iocb, struct sock *sk,
 					   struct msghdr *msg, size_t len);
@@ -1189,7 +1191,7 @@
 static inline void sk_filter_release(struct sk_filter *fp)
 {
 	if (atomic_dec_and_test(&fp->refcnt))
-		call_rcu_bh(&fp->rcu, sk_filter_release_rcu);
+		call_rcu(&fp->rcu, sk_filter_release_rcu);
 }
 
 static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
@@ -1264,7 +1266,8 @@
 
 static inline wait_queue_head_t *sk_sleep(struct sock *sk)
 {
-	return &sk->sk_wq->wait;
+	BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
+	return &rcu_dereference_raw(sk->sk_wq)->wait;
 }
 /* Detach socket from process context.
  * Announce socket dead, detach it from wait queue and inode.
@@ -1285,7 +1288,7 @@
 static inline void sock_graft(struct sock *sk, struct socket *parent)
 {
 	write_lock_bh(&sk->sk_callback_lock);
-	rcu_assign_pointer(sk->sk_wq, parent->wq);
+	sk->sk_wq = parent->wq;
 	parent->sk = sk;
 	sk_set_socket(sk, parent);
 	security_sock_graft(sk, parent);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 38509f0..cda30ea 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -196,6 +196,9 @@
 /* TCP thin-stream limits */
 #define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
 
+/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
+#define TCP_INIT_CWND		10
+
 extern struct inet_timewait_death_row tcp_death_row;
 
 /* sysctl variables for tcp */
@@ -799,15 +802,6 @@
 /* Use define here intentionally to get WARN_ON location shown at the caller */
 #define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
 
-/*
- * Convert RFC 3390 larger initial window into an equivalent number of packets.
- * This is based on the numbers specified in RFC 5681, 3.1.
- */
-static inline u32 rfc3390_bytes_to_packets(const u32 smss)
-{
-	return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
-}
-
 extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
 extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst);
 
@@ -1074,8 +1068,6 @@
 	return 1;
 }
 
-#define TCP_CHECK_TIMER(sk) do { } while (0)
-
 static inline void tcp_mib_init(struct net *net)
 {
 	/* See RFC 2012 */
@@ -1404,7 +1396,7 @@
 extern void tcp_v4_destroy_sock(struct sock *sk);
 
 extern int tcp_v4_gso_send_check(struct sk_buff *skb);
-extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
+extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features);
 extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
 					struct sk_buff *skb);
 extern struct sk_buff **tcp4_gro_receive(struct sk_buff **head,
diff --git a/include/net/udp.h b/include/net/udp.h
index bb967dd..e82f3a8 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -245,5 +245,5 @@
 extern void udp_init(void);
 
 extern int udp4_ufo_send_check(struct sk_buff *skb);
-extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features);
+extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features);
 #endif	/* _UDP_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index b9f385d..1f6e8a0 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -36,6 +36,7 @@
 #define XFRM_PROTO_ROUTING	IPPROTO_ROUTING
 #define XFRM_PROTO_DSTOPTS	IPPROTO_DSTOPTS
 
+#define XFRM_ALIGN4(len)	(((len) + 3) & ~3)
 #define XFRM_ALIGN8(len)	(((len) + 7) & ~7)
 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
 	MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 648d233..b76d400 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -9,6 +9,7 @@
 #define _SCSI_SCSI_H
 
 #include <linux/types.h>
+#include <linux/scatterlist.h>
 
 struct scsi_cmnd;
 
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 07fdfb6..0828b6c8 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -8,7 +8,6 @@
 #include <scsi/scsi_cmnd.h>
 #include <net/sock.h>
 #include <net/tcp.h>
-#include "target_core_mib.h"
 
 #define TARGET_CORE_MOD_VERSION		"v4.0.0-rc6"
 #define SHUTDOWN_SIGS	(sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT))
@@ -195,6 +194,21 @@
 	SAM_TASK_ATTR_EMULATED
 } t10_task_attr_index_t;
 
+/*
+ * Used for target SCSI statistics
+ */
+typedef enum {
+	SCSI_INST_INDEX,
+	SCSI_DEVICE_INDEX,
+	SCSI_AUTH_INTR_INDEX,
+	SCSI_INDEX_TYPE_MAX
+} scsi_index_t;
+
+struct scsi_index_table {
+	spinlock_t	lock;
+	u32		scsi_mib_index[SCSI_INDEX_TYPE_MAX];
+} ____cacheline_aligned;
+
 struct se_cmd;
 
 struct t10_alua {
@@ -578,8 +592,6 @@
 	spinlock_t		stats_lock;
 	/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
 	atomic_t		acl_pr_ref_count;
-	/* Used for MIB access */
-	atomic_t		mib_ref_count;
 	struct se_dev_entry	*device_list;
 	struct se_session	*nacl_sess;
 	struct se_portal_group *se_tpg;
@@ -595,8 +607,6 @@
 } ____cacheline_aligned;
 
 struct se_session {
-	/* Used for MIB access */
-	atomic_t		mib_ref_count;
 	u64			sess_bin_isid;
 	struct se_node_acl	*se_node_acl;
 	struct se_portal_group *se_tpg;
@@ -806,7 +816,6 @@
 	/* Virtual iSCSI devices attached. */
 	u32			dev_count;
 	u32			hba_index;
-	atomic_t		dev_mib_access_count;
 	atomic_t		load_balance_queue;
 	atomic_t		left_queue_depth;
 	/* Maximum queue depth the HBA can handle. */
@@ -845,6 +854,12 @@
 
 #define SE_LUN(c)		((struct se_lun *)(c)->se_lun)
 
+struct scsi_port_stats {
+       u64     cmd_pdus;
+       u64     tx_data_octets;
+       u64     rx_data_octets;
+} ____cacheline_aligned;
+
 struct se_port {
 	/* RELATIVE TARGET PORT IDENTIFER */
 	u16		sep_rtpi;
@@ -867,6 +882,7 @@
 } ____cacheline_aligned;
 
 struct se_tpg_np {
+	struct se_portal_group *tpg_np_parent;
 	struct config_group	tpg_np_group;
 } ____cacheline_aligned;
 
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index 66f44e5..2469405 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -111,6 +111,8 @@
 
 extern int init_se_global(void);
 extern void release_se_global(void);
+extern void init_scsi_index_table(void);
+extern u32 scsi_get_new_index(scsi_index_t);
 extern void transport_init_queue_obj(struct se_queue_obj *);
 extern int transport_subsystem_check_init(void);
 extern int transport_subsystem_register(struct se_subsystem_api *);
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index e16610c..3e68366 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -446,14 +446,16 @@
  *	.reg			= ftrace_event_reg,
  * };
  *
- * static struct ftrace_event_call __used
- * __attribute__((__aligned__(4)))
- * __attribute__((section("_ftrace_events"))) event_<call> = {
+ * static struct ftrace_event_call event_<call> = {
  *	.name			= "<call>",
  *	.class			= event_class_<template>,
  *	.event			= &ftrace_event_type_<call>,
  *	.print_fmt		= print_fmt_<call>,
  * };
+ * // its only safe to use pointers when doing linker tricks to
+ * // create an array.
+ * static struct ftrace_event_call __used
+ * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
  *
  */
 
@@ -579,28 +581,28 @@
 #undef DEFINE_EVENT
 #define DEFINE_EVENT(template, call, proto, args)			\
 									\
-static struct ftrace_event_call __used					\
-__attribute__((__aligned__(4)))						\
-__attribute__((section("_ftrace_events"))) event_##call = {		\
+static struct ftrace_event_call __used event_##call = {			\
 	.name			= #call,				\
 	.class			= &event_class_##template,		\
 	.event.funcs		= &ftrace_event_type_funcs_##template,	\
 	.print_fmt		= print_fmt_##template,			\
-};
+};									\
+static struct ftrace_event_call __used					\
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
 
 #undef DEFINE_EVENT_PRINT
 #define DEFINE_EVENT_PRINT(template, call, proto, args, print)		\
 									\
 static const char print_fmt_##call[] = print;				\
 									\
-static struct ftrace_event_call __used					\
-__attribute__((__aligned__(4)))						\
-__attribute__((section("_ftrace_events"))) event_##call = {		\
+static struct ftrace_event_call __used event_##call = {			\
 	.name			= #call,				\
 	.class			= &event_class_##template,		\
 	.event.funcs		= &ftrace_event_type_funcs_##call,	\
 	.print_fmt		= print_fmt_##call,			\
-}
+};									\
+static struct ftrace_event_call __used					\
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
 
 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
 
diff --git a/init/Kconfig b/init/Kconfig
index 4e33790..be788c0 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -745,8 +745,8 @@
 endif # CGROUPS
 
 menuconfig NAMESPACES
-	bool "Namespaces support" if EMBEDDED
-	default !EMBEDDED
+	bool "Namespaces support" if EXPERT
+	default !EXPERT
 	help
 	  Provides the way to make tasks work with different objects using
 	  the same id. For example same IPC id may refer to different objects
@@ -899,23 +899,31 @@
 config ANON_INODES
 	bool
 
-menuconfig EMBEDDED
-	bool "Configure standard kernel features (for small systems)"
+menuconfig EXPERT
+	bool "Configure standard kernel features (expert users)"
 	help
 	  This option allows certain base kernel options and settings
           to be disabled or tweaked. This is for specialized
           environments which can tolerate a "non-standard" kernel.
           Only use this if you really know what you are doing.
 
+config EMBEDDED
+	bool "Embedded system"
+	select EXPERT
+	help
+	  This option should be enabled if compiling the kernel for
+	  an embedded system so certain expert options are available
+	  for configuration.
+
 config UID16
-	bool "Enable 16-bit UID system calls" if EMBEDDED
+	bool "Enable 16-bit UID system calls" if EXPERT
 	depends on ARM || BLACKFIN || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && COMPAT) || UML || (X86_64 && IA32_EMULATION)
 	default y
 	help
 	  This enables the legacy 16-bit UID syscall wrappers.
 
 config SYSCTL_SYSCALL
-	bool "Sysctl syscall support" if EMBEDDED
+	bool "Sysctl syscall support" if EXPERT
 	depends on PROC_SYSCTL
 	default y
 	select SYSCTL
@@ -932,7 +940,7 @@
 	  If unsure say Y here.
 
 config KALLSYMS
-	 bool "Load all symbols for debugging/ksymoops" if EMBEDDED
+	 bool "Load all symbols for debugging/ksymoops" if EXPERT
 	 default y
 	 help
 	   Say Y here to let the kernel print out symbolic crash information and
@@ -963,7 +971,7 @@
 
 
 config HOTPLUG
-	bool "Support for hot-pluggable devices" if EMBEDDED
+	bool "Support for hot-pluggable devices" if EXPERT
 	default y
 	help
 	  This option is provided for the case where no hotplug or uevent
@@ -973,7 +981,7 @@
 
 config PRINTK
 	default y
-	bool "Enable support for printk" if EMBEDDED
+	bool "Enable support for printk" if EXPERT
 	help
 	  This option enables normal printk support. Removing it
 	  eliminates most of the message strings from the kernel image
@@ -982,7 +990,7 @@
 	  strongly discouraged.
 
 config BUG
-	bool "BUG() support" if EMBEDDED
+	bool "BUG() support" if EXPERT
 	default y
 	help
           Disabling this option eliminates support for BUG and WARN, reducing
@@ -993,12 +1001,12 @@
 
 config ELF_CORE
 	default y
-	bool "Enable ELF core dumps" if EMBEDDED
+	bool "Enable ELF core dumps" if EXPERT
 	help
 	  Enable support for generating core dumps. Disabling saves about 4k.
 
 config PCSPKR_PLATFORM
-	bool "Enable PC-Speaker support" if EMBEDDED
+	bool "Enable PC-Speaker support" if EXPERT
 	depends on ALPHA || X86 || MIPS || PPC_PREP || PPC_CHRP || PPC_PSERIES
 	default y
 	help
@@ -1007,14 +1015,14 @@
 
 config BASE_FULL
 	default y
-	bool "Enable full-sized data structures for core" if EMBEDDED
+	bool "Enable full-sized data structures for core" if EXPERT
 	help
 	  Disabling this option reduces the size of miscellaneous core
 	  kernel data structures. This saves memory on small machines,
 	  but may reduce performance.
 
 config FUTEX
-	bool "Enable futex support" if EMBEDDED
+	bool "Enable futex support" if EXPERT
 	default y
 	select RT_MUTEXES
 	help
@@ -1023,7 +1031,7 @@
 	  run glibc-based applications correctly.
 
 config EPOLL
-	bool "Enable eventpoll support" if EMBEDDED
+	bool "Enable eventpoll support" if EXPERT
 	default y
 	select ANON_INODES
 	help
@@ -1031,7 +1039,7 @@
 	  support for epoll family of system calls.
 
 config SIGNALFD
-	bool "Enable signalfd() system call" if EMBEDDED
+	bool "Enable signalfd() system call" if EXPERT
 	select ANON_INODES
 	default y
 	help
@@ -1041,7 +1049,7 @@
 	  If unsure, say Y.
 
 config TIMERFD
-	bool "Enable timerfd() system call" if EMBEDDED
+	bool "Enable timerfd() system call" if EXPERT
 	select ANON_INODES
 	default y
 	help
@@ -1051,7 +1059,7 @@
 	  If unsure, say Y.
 
 config EVENTFD
-	bool "Enable eventfd() system call" if EMBEDDED
+	bool "Enable eventfd() system call" if EXPERT
 	select ANON_INODES
 	default y
 	help
@@ -1061,7 +1069,7 @@
 	  If unsure, say Y.
 
 config SHMEM
-	bool "Use full shmem filesystem" if EMBEDDED
+	bool "Use full shmem filesystem" if EXPERT
 	default y
 	depends on MMU
 	help
@@ -1072,7 +1080,7 @@
 	  which may be appropriate on small systems without swap.
 
 config AIO
-	bool "Enable AIO support" if EMBEDDED
+	bool "Enable AIO support" if EXPERT
 	default y
 	help
 	  This option enables POSIX asynchronous I/O which may by used
@@ -1149,16 +1157,16 @@
 
 config VM_EVENT_COUNTERS
 	default y
-	bool "Enable VM event counters for /proc/vmstat" if EMBEDDED
+	bool "Enable VM event counters for /proc/vmstat" if EXPERT
 	help
 	  VM event counters are needed for event counts to be shown.
 	  This option allows the disabling of the VM event counters
-	  on EMBEDDED systems.  /proc/vmstat will only show page counts
+	  on EXPERT systems.  /proc/vmstat will only show page counts
 	  if VM event counters are disabled.
 
 config PCI_QUIRKS
 	default y
-	bool "Enable PCI quirk workarounds" if EMBEDDED
+	bool "Enable PCI quirk workarounds" if EXPERT
 	depends on PCI
 	help
 	  This enables workarounds for various PCI chipset
@@ -1167,7 +1175,7 @@
 
 config SLUB_DEBUG
 	default y
-	bool "Enable SLUB debugging support" if EMBEDDED
+	bool "Enable SLUB debugging support" if EXPERT
 	depends on SLUB && SYSFS
 	help
 	  SLUB has extensive debug support features. Disabling these can
@@ -1211,7 +1219,7 @@
 	   a slab allocator.
 
 config SLOB
-	depends on EMBEDDED
+	depends on EXPERT
 	bool "SLOB (Simple Allocator)"
 	help
 	   SLOB replaces the stock allocator with a drastically simpler
@@ -1222,7 +1230,7 @@
 
 config MMAP_ALLOW_UNINITIALIZED
 	bool "Allow mmapped anonymous memory to be uninitialized"
-	depends on EMBEDDED && !MMU
+	depends on EXPERT && !MMU
 	default n
 	help
 	  Normally, and according to the Linux spec, anonymous memory obtained
diff --git a/init/calibrate.c b/init/calibrate.c
index 6eb48e5..24fe022 100644
--- a/init/calibrate.c
+++ b/init/calibrate.c
@@ -66,7 +66,7 @@
 		pre_start = 0;
 		read_current_timer(&start);
 		start_jiffies = jiffies;
-		while (jiffies <= (start_jiffies + 1)) {
+		while (time_before_eq(jiffies, start_jiffies + 1)) {
 			pre_start = start;
 			read_current_timer(&start);
 		}
@@ -74,8 +74,8 @@
 
 		pre_end = 0;
 		end = post_start;
-		while (jiffies <=
-		       (start_jiffies + 1 + DELAY_CALIBRATION_TICKS)) {
+		while (time_before_eq(jiffies, start_jiffies + 1 +
+					       DELAY_CALIBRATION_TICKS)) {
 			pre_end = end;
 			read_current_timer(&end);
 		}
diff --git a/init/main.c b/init/main.c
index 00799c1..33c37c3 100644
--- a/init/main.c
+++ b/init/main.c
@@ -96,6 +96,15 @@
 extern void tc_init(void);
 #endif
 
+/*
+ * Debug helper: via this flag we know that we are in 'early bootup code'
+ * where only the boot processor is running with IRQ disabled.  This means
+ * two things - IRQ must not be enabled before the flag is cleared and some
+ * operations which are not allowed with IRQ disabled are allowed while the
+ * flag is set.
+ */
+bool early_boot_irqs_disabled __read_mostly;
+
 enum system_states system_state __read_mostly;
 EXPORT_SYMBOL(system_state);
 
@@ -554,7 +563,7 @@
 	cgroup_init_early();
 
 	local_irq_disable();
-	early_boot_irqs_off();
+	early_boot_irqs_disabled = true;
 
 /*
  * Interrupts are still disabled. Do necessary setups, then
@@ -621,7 +630,7 @@
 	if (!irqs_disabled())
 		printk(KERN_CRIT "start_kernel(): bug: interrupts were "
 				 "enabled early\n");
-	early_boot_irqs_on();
+	early_boot_irqs_disabled = false;
 	local_irq_enable();
 
 	/* Interrupts are enabled now so all GFP allocations are safe. */
diff --git a/kernel/audit.c b/kernel/audit.c
index e495624..162e88e 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -74,6 +74,8 @@
 int		audit_enabled;
 int		audit_ever_enabled;
 
+EXPORT_SYMBOL_GPL(audit_enabled);
+
 /* Default state when kernel boots without any parameters. */
 static int	audit_default;
 
diff --git a/kernel/capability.c b/kernel/capability.c
index 2f05303..9e9385f 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -306,7 +306,7 @@
 		BUG();
 	}
 
-	if (security_capable(cap) == 0) {
+	if (security_capable(current_cred(), cap) == 0) {
 		current->flags |= PF_SUPERPRIV;
 		return 1;
 	}
diff --git a/kernel/cred.c b/kernel/cred.c
index 6a1aa00..3a9d6dd 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -252,13 +252,13 @@
 #endif
 
 	atomic_set(&new->usage, 1);
+#ifdef CONFIG_DEBUG_CREDENTIALS
+	new->magic = CRED_MAGIC;
+#endif
 
 	if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
 		goto error;
 
-#ifdef CONFIG_DEBUG_CREDENTIALS
-	new->magic = CRED_MAGIC;
-#endif
 	return new;
 
 error:
@@ -657,6 +657,8 @@
 	validate_creds(old);
 
 	*new = *old;
+	atomic_set(&new->usage, 1);
+	set_cred_subscribers(new, 0);
 	get_uid(new->user);
 	get_group_info(new->group_info);
 
@@ -674,8 +676,6 @@
 	if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
 		goto error;
 
-	atomic_set(&new->usage, 1);
-	set_cred_subscribers(new, 0);
 	put_cred(old);
 	validate_creds(new);
 	return new;
@@ -748,7 +748,11 @@
 	if (cred->magic != CRED_MAGIC)
 		return true;
 #ifdef CONFIG_SECURITY_SELINUX
-	if (selinux_is_enabled()) {
+	/*
+	 * cred->security == NULL if security_cred_alloc_blank() or
+	 * security_prepare_creds() returned an error.
+	 */
+	if (selinux_is_enabled() && cred->security) {
 		if ((unsigned long) cred->security < PAGE_SIZE)
 			return true;
 		if ((*(u32 *)cred->security & 0xffffff00) ==
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 31d766b..8e42fec 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -9,9 +9,6 @@
 config GENERIC_HARDIRQS
        def_bool y
 
-config GENERIC_HARDIRQS_NO__DO_IRQ
-       def_bool y
-
 # Select this to disable the deprecated stuff
 config GENERIC_HARDIRQS_NO_DEPRECATED
        def_bool n
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index e2347eb..3540a71 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -118,114 +118,3 @@
 
 	return retval;
 }
-
-#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
-
-#ifdef CONFIG_ENABLE_WARN_DEPRECATED
-# warning __do_IRQ is deprecated. Please convert to proper flow handlers
-#endif
-
-/**
- * __do_IRQ - original all in one highlevel IRQ handler
- * @irq:	the interrupt number
- *
- * __do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- *
- * This is the original x86 implementation which is used for every
- * interrupt type.
- */
-unsigned int __do_IRQ(unsigned int irq)
-{
-	struct irq_desc *desc = irq_to_desc(irq);
-	struct irqaction *action;
-	unsigned int status;
-
-	kstat_incr_irqs_this_cpu(irq, desc);
-
-	if (CHECK_IRQ_PER_CPU(desc->status)) {
-		irqreturn_t action_ret;
-
-		/*
-		 * No locking required for CPU-local interrupts:
-		 */
-		if (desc->irq_data.chip->ack)
-			desc->irq_data.chip->ack(irq);
-		if (likely(!(desc->status & IRQ_DISABLED))) {
-			action_ret = handle_IRQ_event(irq, desc->action);
-			if (!noirqdebug)
-				note_interrupt(irq, desc, action_ret);
-		}
-		desc->irq_data.chip->end(irq);
-		return 1;
-	}
-
-	raw_spin_lock(&desc->lock);
-	if (desc->irq_data.chip->ack)
-		desc->irq_data.chip->ack(irq);
-	/*
-	 * REPLAY is when Linux resends an IRQ that was dropped earlier
-	 * WAITING is used by probe to mark irqs that are being tested
-	 */
-	status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
-	status |= IRQ_PENDING; /* we _want_ to handle it */
-
-	/*
-	 * If the IRQ is disabled for whatever reason, we cannot
-	 * use the action we have.
-	 */
-	action = NULL;
-	if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
-		action = desc->action;
-		status &= ~IRQ_PENDING; /* we commit to handling */
-		status |= IRQ_INPROGRESS; /* we are handling it */
-	}
-	desc->status = status;
-
-	/*
-	 * If there is no IRQ handler or it was disabled, exit early.
-	 * Since we set PENDING, if another processor is handling
-	 * a different instance of this same irq, the other processor
-	 * will take care of it.
-	 */
-	if (unlikely(!action))
-		goto out;
-
-	/*
-	 * Edge triggered interrupts need to remember
-	 * pending events.
-	 * This applies to any hw interrupts that allow a second
-	 * instance of the same irq to arrive while we are in do_IRQ
-	 * or in the handler. But the code here only handles the _second_
-	 * instance of the irq, not the third or fourth. So it is mostly
-	 * useful for irq hardware that does not mask cleanly in an
-	 * SMP environment.
-	 */
-	for (;;) {
-		irqreturn_t action_ret;
-
-		raw_spin_unlock(&desc->lock);
-
-		action_ret = handle_IRQ_event(irq, action);
-		if (!noirqdebug)
-			note_interrupt(irq, desc, action_ret);
-
-		raw_spin_lock(&desc->lock);
-		if (likely(!(desc->status & IRQ_PENDING)))
-			break;
-		desc->status &= ~IRQ_PENDING;
-	}
-	desc->status &= ~IRQ_INPROGRESS;
-
-out:
-	/*
-	 * The ->end() handler has to deal with interrupts which got
-	 * disabled while the handler was running.
-	 */
-	desc->irq_data.chip->end(irq);
-	raw_spin_unlock(&desc->lock);
-
-	return 1;
-}
-#endif
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0caa59f..0587c5c 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -134,6 +134,10 @@
 		irq_set_thread_affinity(desc);
 	}
 #endif
+	if (desc->affinity_notify) {
+		kref_get(&desc->affinity_notify->kref);
+		schedule_work(&desc->affinity_notify->work);
+	}
 	desc->status |= IRQ_AFFINITY_SET;
 	raw_spin_unlock_irqrestore(&desc->lock, flags);
 	return 0;
@@ -155,6 +159,79 @@
 }
 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
 
+static void irq_affinity_notify(struct work_struct *work)
+{
+	struct irq_affinity_notify *notify =
+		container_of(work, struct irq_affinity_notify, work);
+	struct irq_desc *desc = irq_to_desc(notify->irq);
+	cpumask_var_t cpumask;
+	unsigned long flags;
+
+	if (!desc)
+		goto out;
+
+	if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+		goto out;
+
+	raw_spin_lock_irqsave(&desc->lock, flags);
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+	if (desc->status & IRQ_MOVE_PENDING)
+		cpumask_copy(cpumask, desc->pending_mask);
+	else
+#endif
+		cpumask_copy(cpumask, desc->affinity);
+	raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+	notify->notify(notify, cpumask);
+
+	free_cpumask_var(cpumask);
+out:
+	kref_put(&notify->kref, notify->release);
+}
+
+/**
+ *	irq_set_affinity_notifier - control notification of IRQ affinity changes
+ *	@irq:		Interrupt for which to enable/disable notification
+ *	@notify:	Context for notification, or %NULL to disable
+ *			notification.  Function pointers must be initialised;
+ *			the other fields will be initialised by this function.
+ *
+ *	Must be called in process context.  Notification may only be enabled
+ *	after the IRQ is allocated and must be disabled before the IRQ is
+ *	freed using free_irq().
+ */
+int
+irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
+{
+	struct irq_desc *desc = irq_to_desc(irq);
+	struct irq_affinity_notify *old_notify;
+	unsigned long flags;
+
+	/* The release function is promised process context */
+	might_sleep();
+
+	if (!desc)
+		return -EINVAL;
+
+	/* Complete initialisation of *notify */
+	if (notify) {
+		notify->irq = irq;
+		kref_init(&notify->kref);
+		INIT_WORK(&notify->work, irq_affinity_notify);
+	}
+
+	raw_spin_lock_irqsave(&desc->lock, flags);
+	old_notify = desc->affinity_notify;
+	desc->affinity_notify = notify;
+	raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+	if (old_notify)
+		kref_put(&old_notify->kref, old_notify->release);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
+
 #ifndef CONFIG_AUTO_IRQ_AFFINITY
 /*
  * Generic version of the affinity autoselector.
@@ -1004,6 +1081,11 @@
 	if (!desc)
 		return;
 
+#ifdef CONFIG_SMP
+	if (WARN_ON(desc->affinity_notify))
+		desc->affinity_notify = NULL;
+#endif
+
 	chip_bus_lock(desc);
 	kfree(__free_irq(irq, dev_id));
 	chip_bus_sync_unlock(desc);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 1d25419..441fd62 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -56,6 +56,7 @@
 void move_native_irq(int irq)
 {
 	struct irq_desc *desc = irq_to_desc(irq);
+	bool masked;
 
 	if (likely(!(desc->status & IRQ_MOVE_PENDING)))
 		return;
@@ -63,8 +64,15 @@
 	if (unlikely(desc->status & IRQ_DISABLED))
 		return;
 
-	desc->irq_data.chip->irq_mask(&desc->irq_data);
+	/*
+	 * Be careful vs. already masked interrupts. If this is a
+	 * threaded interrupt with ONESHOT set, we can end up with an
+	 * interrupt storm.
+	 */
+	masked = desc->status & IRQ_MASKED;
+	if (!masked)
+		desc->irq_data.chip->irq_mask(&desc->irq_data);
 	move_masked_irq(irq);
-	desc->irq_data.chip->irq_unmask(&desc->irq_data);
+	if (!masked)
+		desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
-
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 42ba65d..0d2058d 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2292,22 +2292,6 @@
 }
 
 /*
- * Debugging helper: via this flag we know that we are in
- * 'early bootup code', and will warn about any invalid irqs-on event:
- */
-static int early_boot_irqs_enabled;
-
-void early_boot_irqs_off(void)
-{
-	early_boot_irqs_enabled = 0;
-}
-
-void early_boot_irqs_on(void)
-{
-	early_boot_irqs_enabled = 1;
-}
-
-/*
  * Hardirqs will be enabled:
  */
 void trace_hardirqs_on_caller(unsigned long ip)
@@ -2319,7 +2303,7 @@
 	if (unlikely(!debug_locks || current->lockdep_recursion))
 		return;
 
-	if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
+	if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
 		return;
 
 	if (unlikely(curr->hardirqs_enabled)) {
diff --git a/kernel/module.c b/kernel/module.c
index 34e00b7..efa290e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2460,9 +2460,9 @@
 #endif
 
 #ifdef CONFIG_TRACEPOINTS
-	mod->tracepoints = section_objs(info, "__tracepoints",
-					sizeof(*mod->tracepoints),
-					&mod->num_tracepoints);
+	mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
+					     sizeof(*mod->tracepoints_ptrs),
+					     &mod->num_tracepoints);
 #endif
 #ifdef HAVE_JUMP_LABEL
 	mod->jump_entries = section_objs(info, "__jump_table",
@@ -3393,7 +3393,7 @@
 		   struct modversion_info *ver,
 		   struct kernel_param *kp,
 		   struct kernel_symbol *ks,
-		   struct tracepoint *tp)
+		   struct tracepoint * const *tp)
 {
 }
 EXPORT_SYMBOL(module_layout);
@@ -3407,8 +3407,8 @@
 	mutex_lock(&module_mutex);
 	list_for_each_entry(mod, &modules, list)
 		if (!mod->taints)
-			tracepoint_update_probe_range(mod->tracepoints,
-				mod->tracepoints + mod->num_tracepoints);
+			tracepoint_update_probe_range(mod->tracepoints_ptrs,
+				mod->tracepoints_ptrs + mod->num_tracepoints);
 	mutex_unlock(&module_mutex);
 }
 
@@ -3432,8 +3432,8 @@
 			else if (iter_mod > iter->module)
 				iter->tracepoint = NULL;
 			found = tracepoint_get_iter_range(&iter->tracepoint,
-				iter_mod->tracepoints,
-				iter_mod->tracepoints
+				iter_mod->tracepoints_ptrs,
+				iter_mod->tracepoints_ptrs
 					+ iter_mod->num_tracepoints);
 			if (found) {
 				iter->module = iter_mod;
diff --git a/kernel/params.c b/kernel/params.c
index 08107d1..0da1411 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -719,9 +719,7 @@
 			params[i].ops->free(params[i].arg);
 }
 
-static void __init kernel_add_sysfs_param(const char *name,
-					  struct kernel_param *kparam,
-					  unsigned int name_skip)
+static struct module_kobject * __init locate_module_kobject(const char *name)
 {
 	struct module_kobject *mk;
 	struct kobject *kobj;
@@ -729,10 +727,7 @@
 
 	kobj = kset_find_obj(module_kset, name);
 	if (kobj) {
-		/* We already have one.  Remove params so we can add more. */
 		mk = to_module_kobject(kobj);
-		/* We need to remove it before adding parameters. */
-		sysfs_remove_group(&mk->kobj, &mk->mp->grp);
 	} else {
 		mk = kzalloc(sizeof(struct module_kobject), GFP_KERNEL);
 		BUG_ON(!mk);
@@ -743,15 +738,36 @@
 					   "%s", name);
 		if (err) {
 			kobject_put(&mk->kobj);
-			printk(KERN_ERR "Module '%s' failed add to sysfs, "
-			       "error number %d\n", name, err);
-			printk(KERN_ERR	"The system will be unstable now.\n");
-			return;
+			printk(KERN_ERR
+				"Module '%s' failed add to sysfs, error number %d\n",
+				name, err);
+			printk(KERN_ERR
+				"The system will be unstable now.\n");
+			return NULL;
 		}
-		/* So that exit path is even. */
+
+		/* So that we hold reference in both cases. */
 		kobject_get(&mk->kobj);
 	}
 
+	return mk;
+}
+
+static void __init kernel_add_sysfs_param(const char *name,
+					  struct kernel_param *kparam,
+					  unsigned int name_skip)
+{
+	struct module_kobject *mk;
+	int err;
+
+	mk = locate_module_kobject(name);
+	if (!mk)
+		return;
+
+	/* We need to remove old parameters before adding more. */
+	if (mk->mp)
+		sysfs_remove_group(&mk->kobj, &mk->mp->grp);
+
 	/* These should not fail at boot. */
 	err = add_sysfs_param(mk, kparam, kparam->name + name_skip);
 	BUG_ON(err);
@@ -796,6 +812,32 @@
 	}
 }
 
+ssize_t __modver_version_show(struct module_attribute *mattr,
+			      struct module *mod, char *buf)
+{
+	struct module_version_attribute *vattr =
+		container_of(mattr, struct module_version_attribute, mattr);
+
+	return sprintf(buf, "%s\n", vattr->version);
+}
+
+extern struct module_version_attribute __start___modver[], __stop___modver[];
+
+static void __init version_sysfs_builtin(void)
+{
+	const struct module_version_attribute *vattr;
+	struct module_kobject *mk;
+	int err;
+
+	for (vattr = __start___modver; vattr < __stop___modver; vattr++) {
+		mk = locate_module_kobject(vattr->module_name);
+		if (mk) {
+			err = sysfs_create_file(&mk->kobj, &vattr->mattr.attr);
+			kobject_uevent(&mk->kobj, KOBJ_ADD);
+			kobject_put(&mk->kobj);
+		}
+	}
+}
 
 /* module-related sysfs stuff */
 
@@ -875,6 +917,7 @@
 	}
 	module_sysfs_initialized = 1;
 
+	version_sysfs_builtin();
 	param_sysfs_builtin();
 
 	return 0;
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 84522c7..999835b 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1901,11 +1901,12 @@
 		return;
 
 	raw_spin_lock(&ctx->lock);
-	update_context_time(ctx);
+	if (ctx->is_active)
+		update_context_time(ctx);
 	update_event_times(event);
+	if (event->state == PERF_EVENT_STATE_ACTIVE)
+		event->pmu->read(event);
 	raw_spin_unlock(&ctx->lock);
-
-	event->pmu->read(event);
 }
 
 static inline u64 perf_event_count(struct perf_event *event)
@@ -1999,8 +2000,7 @@
 	 * accessed from NMI. Use a temporary manual per cpu allocation
 	 * until that gets sorted out.
 	 */
-	size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
-		num_possible_cpus();
+	size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
 
 	entries = kzalloc(size, GFP_KERNEL);
 	if (!entries)
@@ -2201,13 +2201,6 @@
 	if (!task)
 		return ERR_PTR(-ESRCH);
 
-	/*
-	 * Can't attach events to a dying task.
-	 */
-	err = -ESRCH;
-	if (task->flags & PF_EXITING)
-		goto errout;
-
 	/* Reuse ptrace permission checks for now. */
 	err = -EACCES;
 	if (!ptrace_may_access(task, PTRACE_MODE_READ))
@@ -2268,14 +2261,27 @@
 
 		get_ctx(ctx);
 
-		if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) {
-			/*
-			 * We raced with some other task; use
-			 * the context they set.
-			 */
+		err = 0;
+		mutex_lock(&task->perf_event_mutex);
+		/*
+		 * If it has already passed perf_event_exit_task().
+		 * we must see PF_EXITING, it takes this mutex too.
+		 */
+		if (task->flags & PF_EXITING)
+			err = -ESRCH;
+		else if (task->perf_event_ctxp[ctxn])
+			err = -EAGAIN;
+		else
+			rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
+		mutex_unlock(&task->perf_event_mutex);
+
+		if (unlikely(err)) {
 			put_task_struct(task);
 			kfree(ctx);
-			goto retry;
+
+			if (err == -EAGAIN)
+				goto retry;
+			goto errout;
 		}
 	}
 
@@ -5374,6 +5380,8 @@
 	goto out;
 }
 
+static struct lock_class_key cpuctx_mutex;
+
 int perf_pmu_register(struct pmu *pmu, char *name, int type)
 {
 	int cpu, ret;
@@ -5422,6 +5430,7 @@
 
 		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
 		__perf_event_init_context(&cpuctx->ctx);
+		lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
 		cpuctx->ctx.type = cpu_context;
 		cpuctx->ctx.pmu = pmu;
 		cpuctx->jiffies_interval = 1;
@@ -6127,7 +6136,7 @@
 	 * scheduled, so we are now safe from rescheduling changing
 	 * our context.
 	 */
-	child_ctx = child->perf_event_ctxp[ctxn];
+	child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
 	task_ctx_sched_out(child_ctx, EVENT_ALL);
 
 	/*
@@ -6440,11 +6449,6 @@
 	unsigned long flags;
 	int ret = 0;
 
-	child->perf_event_ctxp[ctxn] = NULL;
-
-	mutex_init(&child->perf_event_mutex);
-	INIT_LIST_HEAD(&child->perf_event_list);
-
 	if (likely(!parent->perf_event_ctxp[ctxn]))
 		return 0;
 
@@ -6533,6 +6537,10 @@
 {
 	int ctxn, ret;
 
+	memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
+	mutex_init(&child->perf_event_mutex);
+	INIT_LIST_HEAD(&child->perf_event_list);
+
 	for_each_task_context_nr(ctxn) {
 		ret = perf_event_init_context(child, ctxn);
 		if (ret)
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 7b5db6a..7018530 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -326,7 +326,7 @@
 
 static int __init pm_start_workqueue(void)
 {
-	pm_wq = alloc_workqueue("pm", WQ_FREEZEABLE, 0);
+	pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
 
 	return pm_wq ? 0 : -ENOMEM;
 }
diff --git a/kernel/power/process.c b/kernel/power/process.c
index d6d2a10..0cf3a27 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -22,7 +22,7 @@
  */
 #define TIMEOUT	(20 * HZ)
 
-static inline int freezeable(struct task_struct * p)
+static inline int freezable(struct task_struct * p)
 {
 	if ((p == current) ||
 	    (p->flags & PF_NOFREEZE) ||
@@ -53,7 +53,7 @@
 		todo = 0;
 		read_lock(&tasklist_lock);
 		do_each_thread(g, p) {
-			if (frozen(p) || !freezeable(p))
+			if (frozen(p) || !freezable(p))
 				continue;
 
 			if (!freeze_task(p, sig_only))
@@ -167,7 +167,7 @@
 
 	read_lock(&tasklist_lock);
 	do_each_thread(g, p) {
-		if (!freezeable(p))
+		if (!freezable(p))
 			continue;
 
 		if (nosig_only && should_send_signal(p))
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 0dac75e..64db648 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1519,11 +1519,8 @@
 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
 		unsigned int nr_pages, unsigned int nr_highmem)
 {
-	int error = 0;
-
 	if (nr_highmem > 0) {
-		error = get_highmem_buffer(PG_ANY);
-		if (error)
+		if (get_highmem_buffer(PG_ANY))
 			goto err_out;
 		if (nr_highmem > alloc_highmem) {
 			nr_highmem -= alloc_highmem;
@@ -1546,7 +1543,7 @@
 
  err_out:
 	swsusp_free();
-	return error;
+	return -ENOMEM;
 }
 
 asmlinkage int swsusp_save(void)
diff --git a/kernel/printk.c b/kernel/printk.c
index 53d9a9e..3623152 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -97,7 +97,7 @@
 /*
  * logbuf_lock protects log_buf, log_start, log_end, con_start and logged_chars
  * It is also used in interesting ways to provide interlocking in
- * release_console_sem().
+ * console_unlock();.
  */
 static DEFINE_SPINLOCK(logbuf_lock);
 
@@ -262,25 +262,47 @@
 int dmesg_restrict;
 #endif
 
+static int syslog_action_restricted(int type)
+{
+	if (dmesg_restrict)
+		return 1;
+	/* Unless restricted, we allow "read all" and "get buffer size" for everybody */
+	return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
+}
+
+static int check_syslog_permissions(int type, bool from_file)
+{
+	/*
+	 * If this is from /proc/kmsg and we've already opened it, then we've
+	 * already done the capabilities checks at open time.
+	 */
+	if (from_file && type != SYSLOG_ACTION_OPEN)
+		return 0;
+
+	if (syslog_action_restricted(type)) {
+		if (capable(CAP_SYSLOG))
+			return 0;
+		/* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
+		if (capable(CAP_SYS_ADMIN)) {
+			WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
+				 "but no CAP_SYSLOG (deprecated).\n");
+			return 0;
+		}
+		return -EPERM;
+	}
+	return 0;
+}
+
 int do_syslog(int type, char __user *buf, int len, bool from_file)
 {
 	unsigned i, j, limit, count;
 	int do_clear = 0;
 	char c;
-	int error = 0;
+	int error;
 
-	/*
-	 * If this is from /proc/kmsg we only do the capabilities checks
-	 * at open time.
-	 */
-	if (type == SYSLOG_ACTION_OPEN || !from_file) {
-		if (dmesg_restrict && !capable(CAP_SYSLOG))
-			goto warn; /* switch to return -EPERM after 2.6.39 */
-		if ((type != SYSLOG_ACTION_READ_ALL &&
-		     type != SYSLOG_ACTION_SIZE_BUFFER) &&
-		    !capable(CAP_SYSLOG))
-			goto warn; /* switch to return -EPERM after 2.6.39 */
-	}
+	error = check_syslog_permissions(type, from_file);
+	if (error)
+		goto out;
 
 	error = security_syslog(type);
 	if (error)
@@ -423,12 +445,6 @@
 	}
 out:
 	return error;
-warn:
-	/* remove after 2.6.39 */
-	if (capable(CAP_SYS_ADMIN))
-		WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
-		  "but no CAP_SYSLOG (deprecated and denied).\n");
-	return -EPERM;
 }
 
 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
@@ -501,7 +517,7 @@
 /*
  * Call the console drivers, asking them to write out
  * log_buf[start] to log_buf[end - 1].
- * The console_sem must be held.
+ * The console_lock must be held.
  */
 static void call_console_drivers(unsigned start, unsigned end)
 {
@@ -604,11 +620,11 @@
  *
  * This is printk().  It can be called from any context.  We want it to work.
  *
- * We try to grab the console_sem.  If we succeed, it's easy - we log the output and
+ * We try to grab the console_lock.  If we succeed, it's easy - we log the output and
  * call the console drivers.  If we fail to get the semaphore we place the output
  * into the log buffer and return.  The current holder of the console_sem will
- * notice the new output in release_console_sem() and will send it to the
- * consoles before releasing the semaphore.
+ * notice the new output in console_unlock(); and will send it to the
+ * consoles before releasing the lock.
  *
  * One effect of this deferred printing is that code which calls printk() and
  * then changes console_loglevel may break. This is because console_loglevel
@@ -659,19 +675,19 @@
 /*
  * Try to get console ownership to actually show the kernel
  * messages from a 'printk'. Return true (and with the
- * console_semaphore held, and 'console_locked' set) if it
+ * console_lock held, and 'console_locked' set) if it
  * is successful, false otherwise.
  *
  * This gets called with the 'logbuf_lock' spinlock held and
  * interrupts disabled. It should return with 'lockbuf_lock'
  * released but interrupts still disabled.
  */
-static int acquire_console_semaphore_for_printk(unsigned int cpu)
+static int console_trylock_for_printk(unsigned int cpu)
 	__releases(&logbuf_lock)
 {
 	int retval = 0;
 
-	if (!try_acquire_console_sem()) {
+	if (console_trylock()) {
 		retval = 1;
 
 		/*
@@ -827,12 +843,12 @@
 	 * actual magic (print out buffers, wake up klogd,
 	 * etc). 
 	 *
-	 * The acquire_console_semaphore_for_printk() function
+	 * The console_trylock_for_printk() function
 	 * will release 'logbuf_lock' regardless of whether it
 	 * actually gets the semaphore or not.
 	 */
-	if (acquire_console_semaphore_for_printk(this_cpu))
-		release_console_sem();
+	if (console_trylock_for_printk(this_cpu))
+		console_unlock();
 
 	lockdep_on();
 out_restore_irqs:
@@ -993,7 +1009,7 @@
 	if (!console_suspend_enabled)
 		return;
 	printk("Suspending console(s) (use no_console_suspend to debug)\n");
-	acquire_console_sem();
+	console_lock();
 	console_suspended = 1;
 	up(&console_sem);
 }
@@ -1004,7 +1020,7 @@
 		return;
 	down(&console_sem);
 	console_suspended = 0;
-	release_console_sem();
+	console_unlock();
 }
 
 /**
@@ -1027,21 +1043,21 @@
 	case CPU_DYING:
 	case CPU_DOWN_FAILED:
 	case CPU_UP_CANCELED:
-		acquire_console_sem();
-		release_console_sem();
+		console_lock();
+		console_unlock();
 	}
 	return NOTIFY_OK;
 }
 
 /**
- * acquire_console_sem - lock the console system for exclusive use.
+ * console_lock - lock the console system for exclusive use.
  *
- * Acquires a semaphore which guarantees that the caller has
+ * Acquires a lock which guarantees that the caller has
  * exclusive access to the console system and the console_drivers list.
  *
  * Can sleep, returns nothing.
  */
-void acquire_console_sem(void)
+void console_lock(void)
 {
 	BUG_ON(in_interrupt());
 	down(&console_sem);
@@ -1050,21 +1066,29 @@
 	console_locked = 1;
 	console_may_schedule = 1;
 }
-EXPORT_SYMBOL(acquire_console_sem);
+EXPORT_SYMBOL(console_lock);
 
-int try_acquire_console_sem(void)
+/**
+ * console_trylock - try to lock the console system for exclusive use.
+ *
+ * Tried to acquire a lock which guarantees that the caller has
+ * exclusive access to the console system and the console_drivers list.
+ *
+ * returns 1 on success, and 0 on failure to acquire the lock.
+ */
+int console_trylock(void)
 {
 	if (down_trylock(&console_sem))
-		return -1;
+		return 0;
 	if (console_suspended) {
 		up(&console_sem);
-		return -1;
+		return 0;
 	}
 	console_locked = 1;
 	console_may_schedule = 0;
-	return 0;
+	return 1;
 }
-EXPORT_SYMBOL(try_acquire_console_sem);
+EXPORT_SYMBOL(console_trylock);
 
 int is_console_locked(void)
 {
@@ -1095,20 +1119,20 @@
 }
 
 /**
- * release_console_sem - unlock the console system
+ * console_unlock - unlock the console system
  *
- * Releases the semaphore which the caller holds on the console system
+ * Releases the console_lock which the caller holds on the console system
  * and the console driver list.
  *
- * While the semaphore was held, console output may have been buffered
- * by printk().  If this is the case, release_console_sem() emits
- * the output prior to releasing the semaphore.
+ * While the console_lock was held, console output may have been buffered
+ * by printk().  If this is the case, console_unlock(); emits
+ * the output prior to releasing the lock.
  *
  * If there is output waiting for klogd, we wake it up.
  *
- * release_console_sem() may be called from any context.
+ * console_unlock(); may be called from any context.
  */
-void release_console_sem(void)
+void console_unlock(void)
 {
 	unsigned long flags;
 	unsigned _con_start, _log_end;
@@ -1141,7 +1165,7 @@
 	if (wake_klogd)
 		wake_up_klogd();
 }
-EXPORT_SYMBOL(release_console_sem);
+EXPORT_SYMBOL(console_unlock);
 
 /**
  * console_conditional_schedule - yield the CPU if required
@@ -1150,7 +1174,7 @@
  * if this CPU should yield the CPU to another task, do
  * so here.
  *
- * Must be called within acquire_console_sem().
+ * Must be called within console_lock();.
  */
 void __sched console_conditional_schedule(void)
 {
@@ -1171,14 +1195,14 @@
 		if (down_trylock(&console_sem) != 0)
 			return;
 	} else
-		acquire_console_sem();
+		console_lock();
 
 	console_locked = 1;
 	console_may_schedule = 0;
 	for_each_console(c)
 		if ((c->flags & CON_ENABLED) && c->unblank)
 			c->unblank();
-	release_console_sem();
+	console_unlock();
 }
 
 /*
@@ -1189,7 +1213,7 @@
 	struct console *c;
 	struct tty_driver *driver = NULL;
 
-	acquire_console_sem();
+	console_lock();
 	for_each_console(c) {
 		if (!c->device)
 			continue;
@@ -1197,7 +1221,7 @@
 		if (driver)
 			break;
 	}
-	release_console_sem();
+	console_unlock();
 	return driver;
 }
 
@@ -1208,17 +1232,17 @@
  */
 void console_stop(struct console *console)
 {
-	acquire_console_sem();
+	console_lock();
 	console->flags &= ~CON_ENABLED;
-	release_console_sem();
+	console_unlock();
 }
 EXPORT_SYMBOL(console_stop);
 
 void console_start(struct console *console)
 {
-	acquire_console_sem();
+	console_lock();
 	console->flags |= CON_ENABLED;
-	release_console_sem();
+	console_unlock();
 }
 EXPORT_SYMBOL(console_start);
 
@@ -1340,7 +1364,7 @@
 	 *	Put this console in the list - keep the
 	 *	preferred driver at the head of the list.
 	 */
-	acquire_console_sem();
+	console_lock();
 	if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) {
 		newcon->next = console_drivers;
 		console_drivers = newcon;
@@ -1352,14 +1376,14 @@
 	}
 	if (newcon->flags & CON_PRINTBUFFER) {
 		/*
-		 * release_console_sem() will print out the buffered messages
+		 * console_unlock(); will print out the buffered messages
 		 * for us.
 		 */
 		spin_lock_irqsave(&logbuf_lock, flags);
 		con_start = log_start;
 		spin_unlock_irqrestore(&logbuf_lock, flags);
 	}
-	release_console_sem();
+	console_unlock();
 	console_sysfs_notify();
 
 	/*
@@ -1396,7 +1420,7 @@
 		return braille_unregister_console(console);
 #endif
 
-	acquire_console_sem();
+	console_lock();
 	if (console_drivers == console) {
 		console_drivers=console->next;
 		res = 0;
@@ -1418,7 +1442,7 @@
 	if (console_drivers != NULL && console->flags & CON_CONSDEV)
 		console_drivers->flags |= CON_CONSDEV;
 
-	release_console_sem();
+	console_unlock();
 	console_sysfs_notify();
 	return res;
 }
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 99bbaa3..1708b1e 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -313,7 +313,7 @@
 		child->exit_code = data;
 		dead = __ptrace_detach(current, child);
 		if (!child->exit_state)
-			wake_up_process(child);
+			wake_up_state(child, TASK_TRACED | TASK_STOPPED);
 	}
 	write_unlock_irq(&tasklist_lock);
 
diff --git a/kernel/sched.c b/kernel/sched.c
index ea3e5ef..18d38e4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -553,9 +553,6 @@
 	/* try_to_wake_up() stats */
 	unsigned int ttwu_count;
 	unsigned int ttwu_local;
-
-	/* BKL stats */
-	unsigned int bkl_count;
 #endif
 };
 
@@ -609,6 +606,9 @@
 	struct task_group *tg;
 	struct cgroup_subsys_state *css;
 
+	if (p->flags & PF_EXITING)
+		return &root_task_group;
+
 	css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
 			lockdep_is_held(&task_rq(p)->lock));
 	tg = container_of(css, struct task_group, css);
@@ -3887,7 +3887,7 @@
 	schedstat_inc(this_rq(), sched_count);
 #ifdef CONFIG_SCHEDSTATS
 	if (unlikely(prev->lock_depth >= 0)) {
-		schedstat_inc(this_rq(), bkl_count);
+		schedstat_inc(this_rq(), rq_sched_info.bkl_count);
 		schedstat_inc(prev, sched_info.bkl_count);
 	}
 #endif
@@ -4871,7 +4871,8 @@
 		 * assigned.
 		 */
 		if (rt_bandwidth_enabled() && rt_policy(policy) &&
-				task_group(p)->rt_bandwidth.rt_runtime == 0) {
+				task_group(p)->rt_bandwidth.rt_runtime == 0 &&
+				!task_group_is_autogroup(task_group(p))) {
 			__task_rq_unlock(rq);
 			raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 			return -EPERM;
@@ -8882,6 +8883,20 @@
 	}
 }
 
+static void
+cpu_cgroup_exit(struct cgroup_subsys *ss, struct task_struct *task)
+{
+	/*
+	 * cgroup_exit() is called in the copy_process() failure path.
+	 * Ignore this case since the task hasn't ran yet, this avoids
+	 * trying to poke a half freed task state from generic code.
+	 */
+	if (!(task->flags & PF_EXITING))
+		return;
+
+	sched_move_task(task);
+}
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
 static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
 				u64 shareval)
@@ -8954,6 +8969,7 @@
 	.destroy	= cpu_cgroup_destroy,
 	.can_attach	= cpu_cgroup_can_attach,
 	.attach		= cpu_cgroup_attach,
+	.exit		= cpu_cgroup_exit,
 	.populate	= cpu_cgroup_populate,
 	.subsys_id	= cpu_cgroup_subsys_id,
 	.early_init	= 1,
diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
index 32a723b..9fb6562 100644
--- a/kernel/sched_autogroup.c
+++ b/kernel/sched_autogroup.c
@@ -27,6 +27,11 @@
 {
 	struct autogroup *ag = container_of(kref, struct autogroup, kref);
 
+#ifdef CONFIG_RT_GROUP_SCHED
+	/* We've redirected RT tasks to the root task group... */
+	ag->tg->rt_se = NULL;
+	ag->tg->rt_rq = NULL;
+#endif
 	sched_destroy_group(ag->tg);
 }
 
@@ -55,6 +60,10 @@
 	return ag;
 }
 
+#ifdef CONFIG_RT_GROUP_SCHED
+static void free_rt_sched_group(struct task_group *tg);
+#endif
+
 static inline struct autogroup *autogroup_create(void)
 {
 	struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL);
@@ -72,6 +81,19 @@
 	init_rwsem(&ag->lock);
 	ag->id = atomic_inc_return(&autogroup_seq_nr);
 	ag->tg = tg;
+#ifdef CONFIG_RT_GROUP_SCHED
+	/*
+	 * Autogroup RT tasks are redirected to the root task group
+	 * so we don't have to move tasks around upon policy change,
+	 * or flail around trying to allocate bandwidth on the fly.
+	 * A bandwidth exception in __sched_setscheduler() allows
+	 * the policy change to proceed.  Thereafter, task_group()
+	 * returns &root_task_group, so zero bandwidth is required.
+	 */
+	free_rt_sched_group(tg);
+	tg->rt_se = root_task_group.rt_se;
+	tg->rt_rq = root_task_group.rt_rq;
+#endif
 	tg->autogroup = ag;
 
 	return ag;
@@ -106,6 +128,11 @@
 	return true;
 }
 
+static inline bool task_group_is_autogroup(struct task_group *tg)
+{
+	return tg != &root_task_group && tg->autogroup;
+}
+
 static inline struct task_group *
 autogroup_task_group(struct task_struct *p, struct task_group *tg)
 {
@@ -231,6 +258,11 @@
 #ifdef CONFIG_SCHED_DEBUG
 static inline int autogroup_path(struct task_group *tg, char *buf, int buflen)
 {
+	int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);
+
+	if (!enabled || !tg->autogroup)
+		return 0;
+
 	return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id);
 }
 #endif /* CONFIG_SCHED_DEBUG */
diff --git a/kernel/sched_autogroup.h b/kernel/sched_autogroup.h
index 5358e24..7b859ff 100644
--- a/kernel/sched_autogroup.h
+++ b/kernel/sched_autogroup.h
@@ -15,6 +15,10 @@
 
 static inline void autogroup_init(struct task_struct *init_task) {  }
 static inline void autogroup_free(struct task_group *tg) { }
+static inline bool task_group_is_autogroup(struct task_group *tg)
+{
+	return 0;
+}
 
 static inline struct task_group *
 autogroup_task_group(struct task_struct *p, struct task_group *tg)
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 1dfae3d..eb6cb8e 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -16,6 +16,8 @@
 #include <linux/kallsyms.h>
 #include <linux/utsname.h>
 
+static DEFINE_SPINLOCK(sched_debug_lock);
+
 /*
  * This allows printing both to /proc/sched_debug and
  * to the console
@@ -86,6 +88,26 @@
 }
 #endif
 
+#ifdef CONFIG_CGROUP_SCHED
+static char group_path[PATH_MAX];
+
+static char *task_group_path(struct task_group *tg)
+{
+	if (autogroup_path(tg, group_path, PATH_MAX))
+		return group_path;
+
+	/*
+	 * May be NULL if the underlying cgroup isn't fully-created yet
+	 */
+	if (!tg->css.cgroup) {
+		group_path[0] = '\0';
+		return group_path;
+	}
+	cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
+	return group_path;
+}
+#endif
+
 static void
 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
 {
@@ -108,6 +130,9 @@
 	SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
 		0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
 #endif
+#ifdef CONFIG_CGROUP_SCHED
+	SEQ_printf(m, " %s", task_group_path(task_group(p)));
+#endif
 
 	SEQ_printf(m, "\n");
 }
@@ -144,7 +169,11 @@
 	struct sched_entity *last;
 	unsigned long flags;
 
+#ifdef CONFIG_FAIR_GROUP_SCHED
+	SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
+#else
 	SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
+#endif
 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
 			SPLIT_NS(cfs_rq->exec_clock));
 
@@ -191,7 +220,11 @@
 
 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
 {
+#ifdef CONFIG_RT_GROUP_SCHED
+	SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
+#else
 	SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
+#endif
 
 #define P(x) \
 	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
@@ -212,6 +245,7 @@
 static void print_cpu(struct seq_file *m, int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
+	unsigned long flags;
 
 #ifdef CONFIG_X86
 	{
@@ -262,14 +296,20 @@
 	P(ttwu_count);
 	P(ttwu_local);
 
-	P(bkl_count);
+	SEQ_printf(m, "  .%-30s: %d\n", "bkl_count",
+				rq->rq_sched_info.bkl_count);
 
 #undef P
+#undef P64
 #endif
+	spin_lock_irqsave(&sched_debug_lock, flags);
 	print_cfs_stats(m, cpu);
 	print_rt_stats(m, cpu);
 
+	rcu_read_lock();
 	print_rq(m, rq, cpu);
+	rcu_read_unlock();
+	spin_unlock_irqrestore(&sched_debug_lock, flags);
 }
 
 static const char *sched_tunable_scaling_names[] = {
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c62ebae..0c26e2d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -699,7 +699,8 @@
 	cfs_rq->nr_running--;
 }
 
-#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
+#ifdef CONFIG_FAIR_GROUP_SCHED
+# ifdef CONFIG_SMP
 static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
 					    int global_update)
 {
@@ -721,10 +722,10 @@
 	u64 now, delta;
 	unsigned long load = cfs_rq->load.weight;
 
-	if (!cfs_rq)
+	if (cfs_rq->tg == &root_task_group)
 		return;
 
-	now = rq_of(cfs_rq)->clock;
+	now = rq_of(cfs_rq)->clock_task;
 	delta = now - cfs_rq->load_stamp;
 
 	/* truncate load history at 4 idle periods */
@@ -762,6 +763,51 @@
 		list_del_leaf_cfs_rq(cfs_rq);
 }
 
+static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg,
+				long weight_delta)
+{
+	long load_weight, load, shares;
+
+	load = cfs_rq->load.weight + weight_delta;
+
+	load_weight = atomic_read(&tg->load_weight);
+	load_weight -= cfs_rq->load_contribution;
+	load_weight += load;
+
+	shares = (tg->shares * load);
+	if (load_weight)
+		shares /= load_weight;
+
+	if (shares < MIN_SHARES)
+		shares = MIN_SHARES;
+	if (shares > tg->shares)
+		shares = tg->shares;
+
+	return shares;
+}
+
+static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
+{
+	if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
+		update_cfs_load(cfs_rq, 0);
+		update_cfs_shares(cfs_rq, 0);
+	}
+}
+# else /* CONFIG_SMP */
+static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
+{
+}
+
+static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg,
+				long weight_delta)
+{
+	return tg->shares;
+}
+
+static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
+{
+}
+# endif /* CONFIG_SMP */
 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
 			    unsigned long weight)
 {
@@ -782,41 +828,20 @@
 {
 	struct task_group *tg;
 	struct sched_entity *se;
-	long load_weight, load, shares;
-
-	if (!cfs_rq)
-		return;
+	long shares;
 
 	tg = cfs_rq->tg;
 	se = tg->se[cpu_of(rq_of(cfs_rq))];
 	if (!se)
 		return;
-
-	load = cfs_rq->load.weight + weight_delta;
-
-	load_weight = atomic_read(&tg->load_weight);
-	load_weight -= cfs_rq->load_contribution;
-	load_weight += load;
-
-	shares = (tg->shares * load);
-	if (load_weight)
-		shares /= load_weight;
-
-	if (shares < MIN_SHARES)
-		shares = MIN_SHARES;
-	if (shares > tg->shares)
-		shares = tg->shares;
+#ifndef CONFIG_SMP
+	if (likely(se->load.weight == tg->shares))
+		return;
+#endif
+	shares = calc_cfs_shares(cfs_rq, tg, weight_delta);
 
 	reweight_entity(cfs_rq_of(se), se, shares);
 }
-
-static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
-{
-	if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
-		update_cfs_load(cfs_rq, 0);
-		update_cfs_shares(cfs_rq, 0);
-	}
-}
 #else /* CONFIG_FAIR_GROUP_SCHED */
 static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
 {
@@ -1062,6 +1087,9 @@
 		struct sched_entity *se = __pick_next_entity(cfs_rq);
 		s64 delta = curr->vruntime - se->vruntime;
 
+		if (delta < 0)
+			return;
+
 		if (delta > ideal_runtime)
 			resched_task(rq_of(cfs_rq)->curr);
 	}
@@ -1362,27 +1390,27 @@
 		return wl;
 
 	for_each_sched_entity(se) {
-		long S, rw, s, a, b;
+		long lw, w;
 
-		S = se->my_q->tg->shares;
-		s = se->load.weight;
-		rw = se->my_q->load.weight;
+		tg = se->my_q->tg;
+		w = se->my_q->load.weight;
 
-		a = S*(rw + wl);
-		b = S*rw + s*wg;
+		/* use this cpu's instantaneous contribution */
+		lw = atomic_read(&tg->load_weight);
+		lw -= se->my_q->load_contribution;
+		lw += w + wg;
 
-		wl = s*(a-b);
+		wl += w;
 
-		if (likely(b))
-			wl /= b;
+		if (lw > 0 && wl < lw)
+			wl = (wl * tg->shares) / lw;
+		else
+			wl = tg->shares;
 
-		/*
-		 * Assume the group is already running and will
-		 * thus already be accounted for in the weight.
-		 *
-		 * That is, moving shares between CPUs, does not
-		 * alter the group weight.
-		 */
+		/* zero point is MIN_SHARES */
+		if (wl < MIN_SHARES)
+			wl = MIN_SHARES;
+		wl -= se->load.weight;
 		wg = 0;
 	}
 
@@ -1401,7 +1429,7 @@
 
 static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
 {
-	unsigned long this_load, load;
+	s64 this_load, load;
 	int idx, this_cpu, prev_cpu;
 	unsigned long tl_per_task;
 	struct task_group *tg;
@@ -1440,8 +1468,8 @@
 	 * Otherwise check if either cpus are near enough in load to allow this
 	 * task to be woken on this_cpu.
 	 */
-	if (this_load) {
-		unsigned long this_eff_load, prev_eff_load;
+	if (this_load > 0) {
+		s64 this_eff_load, prev_eff_load;
 
 		this_eff_load = 100;
 		this_eff_load *= power_of(prev_cpu);
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index c914ec7..ad62677 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -625,7 +625,7 @@
 	struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
 	u64 delta_exec;
 
-	if (!task_has_rt_policy(curr))
+	if (curr->sched_class != &rt_sched_class)
 		return;
 
 	delta_exec = rq->clock_task - curr->se.exec_start;
diff --git a/kernel/smp.c b/kernel/smp.c
index 4ec30e0..9910744 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -194,23 +194,52 @@
 	 */
 	list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
 		int refs;
+		void (*func) (void *info);
 
-		if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
+		/*
+		 * Since we walk the list without any locks, we might
+		 * see an entry that was completed, removed from the
+		 * list and is in the process of being reused.
+		 *
+		 * We must check that the cpu is in the cpumask before
+		 * checking the refs, and both must be set before
+		 * executing the callback on this cpu.
+		 */
+
+		if (!cpumask_test_cpu(cpu, data->cpumask))
 			continue;
 
+		smp_rmb();
+
+		if (atomic_read(&data->refs) == 0)
+			continue;
+
+		func = data->csd.func;			/* for later warn */
 		data->csd.func(data->csd.info);
 
+		/*
+		 * If the cpu mask is not still set then it enabled interrupts,
+		 * we took another smp interrupt, and executed the function
+		 * twice on this cpu.  In theory that copy decremented refs.
+		 */
+		if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) {
+			WARN(1, "%pS enabled interrupts and double executed\n",
+			     func);
+			continue;
+		}
+
 		refs = atomic_dec_return(&data->refs);
 		WARN_ON(refs < 0);
-		if (!refs) {
-			raw_spin_lock(&call_function.lock);
-			list_del_rcu(&data->csd.list);
-			raw_spin_unlock(&call_function.lock);
-		}
 
 		if (refs)
 			continue;
 
+		WARN_ON(!cpumask_empty(data->cpumask));
+
+		raw_spin_lock(&call_function.lock);
+		list_del_rcu(&data->csd.list);
+		raw_spin_unlock(&call_function.lock);
+
 		csd_unlock(&data->csd);
 	}
 
@@ -430,7 +459,7 @@
 	 * can't happen.
 	 */
 	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
-		     && !oops_in_progress);
+		     && !oops_in_progress && !early_boot_irqs_disabled);
 
 	/* So, what's a CPU they want? Ignoring this one. */
 	cpu = cpumask_first_and(mask, cpu_online_mask);
@@ -454,11 +483,21 @@
 
 	data = &__get_cpu_var(cfd_data);
 	csd_lock(&data->csd);
+	BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
 
 	data->csd.func = func;
 	data->csd.info = info;
 	cpumask_and(data->cpumask, mask, cpu_online_mask);
 	cpumask_clear_cpu(this_cpu, data->cpumask);
+
+	/*
+	 * To ensure the interrupt handler gets an complete view
+	 * we order the cpumask and refs writes and order the read
+	 * of them in the interrupt handler.  In addition we may
+	 * only clear our own cpu bit from the mask.
+	 */
+	smp_wmb();
+
 	atomic_set(&data->refs, cpumask_weight(data->cpumask));
 
 	raw_spin_lock_irqsave(&call_function.lock, flags);
@@ -533,17 +572,20 @@
 #endif /* USE_GENERIC_SMP_HELPERS */
 
 /*
- * Call a function on all processors
+ * Call a function on all processors.  May be used during early boot while
+ * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
+ * of local_irq_disable/enable().
  */
 int on_each_cpu(void (*func) (void *info), void *info, int wait)
 {
+	unsigned long flags;
 	int ret = 0;
 
 	preempt_disable();
 	ret = smp_call_function(func, info, wait);
-	local_irq_disable();
+	local_irq_save(flags);
 	func(info);
-	local_irq_enable();
+	local_irq_restore(flags);
 	preempt_enable();
 	return ret;
 }
diff --git a/kernel/sys.c b/kernel/sys.c
index 31b71a2..18da702 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1385,7 +1385,8 @@
 	const struct cred *cred = current_cred(), *tcred;
 
 	tcred = __task_cred(task);
-	if ((cred->uid != tcred->euid ||
+	if (current != task &&
+	    (cred->uid != tcred->euid ||
 	     cred->uid != tcred->suid ||
 	     cred->uid != tcred->uid  ||
 	     cred->gid != tcred->egid ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index bc86bb3..0f1bd83 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -170,7 +170,8 @@
 #endif
 
 #ifdef CONFIG_MAGIC_SYSRQ
-static int __sysrq_enabled; /* Note: sysrq code ises it's own private copy */
+/* Note: sysrq code uses it's own private copy */
+static int __sysrq_enabled = SYSRQ_DEFAULT_ENABLE;
 
 static int sysrq_sysctl_handler(ctl_table *table, int write,
 				void __user *buffer, size_t *lenp,
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3e216e0..c55ea24 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -642,8 +642,7 @@
 	}
 	local_irq_enable();
 
-	printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n",
-	       smp_processor_id());
+	printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id());
 }
 
 /*
@@ -795,8 +794,10 @@
 	}
 
 #ifdef CONFIG_NO_HZ
-	if (tick_nohz_enabled)
+	if (tick_nohz_enabled) {
 		ts->nohz_mode = NOHZ_MODE_HIGHRES;
+		printk(KERN_INFO "Switched to NOHz mode on CPU #%d\n", smp_processor_id());
+	}
 #endif
 }
 #endif /* HIGH_RES_TIMERS */
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 32a19f9..3258455 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -41,7 +41,7 @@
 	char symname[KSYM_NAME_LEN];
 
 	if (lookup_symbol_name((unsigned long)sym, symname) < 0)
-		SEQ_printf(m, "<%p>", sym);
+		SEQ_printf(m, "<%pK>", sym);
 	else
 		SEQ_printf(m, "%s", symname);
 }
@@ -112,7 +112,7 @@
 static void
 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
 {
-	SEQ_printf(m, "  .base:       %p\n", base);
+	SEQ_printf(m, "  .base:       %pK\n", base);
 	SEQ_printf(m, "  .index:      %d\n",
 			base->index);
 	SEQ_printf(m, "  .resolution: %Lu nsecs\n",
diff --git a/kernel/timer.c b/kernel/timer.c
index 43ca993..d645992 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -959,7 +959,7 @@
  *
  * Synchronization rules: Callers must prevent restarting of the timer,
  * otherwise this function is meaningless. It must not be called from
- * hardirq contexts. The caller must not hold locks which would prevent
+ * interrupt contexts. The caller must not hold locks which would prevent
  * completion of the timer's handler. The timer's handler must not call
  * add_timer_on(). Upon exit the timer is not queued and the handler is
  * not running on any CPU.
@@ -969,10 +969,12 @@
 int del_timer_sync(struct timer_list *timer)
 {
 #ifdef CONFIG_LOCKDEP
-	local_bh_disable();
+	unsigned long flags;
+
+	local_irq_save(flags);
 	lock_map_acquire(&timer->lockdep_map);
 	lock_map_release(&timer->lockdep_map);
-	local_bh_enable();
+	local_irq_restore(flags);
 #endif
 	/*
 	 * don't use it in hardirq context, because it
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 153562d..d95721f 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -138,6 +138,13 @@
 		     !blk_tracer_enabled))
 		return;
 
+	/*
+	 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
+	 * message to the trace.
+	 */
+	if (!(bt->act_mask & BLK_TC_NOTIFY))
+		return;
+
 	local_irq_save(flags);
 	buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
 	va_start(args, fmt);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 35fde09..5f499e0 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1284,7 +1284,7 @@
 static void trace_module_add_events(struct module *mod)
 {
 	struct ftrace_module_file_ops *file_ops = NULL;
-	struct ftrace_event_call *call, *start, *end;
+	struct ftrace_event_call **call, **start, **end;
 
 	start = mod->trace_events;
 	end = mod->trace_events + mod->num_trace_events;
@@ -1297,7 +1297,7 @@
 		return;
 
 	for_each_event(call, start, end) {
-		__trace_add_event_call(call, mod,
+		__trace_add_event_call(*call, mod,
 				       &file_ops->id, &file_ops->enable,
 				       &file_ops->filter, &file_ops->format);
 	}
@@ -1367,8 +1367,8 @@
 	.priority = 0,
 };
 
-extern struct ftrace_event_call __start_ftrace_events[];
-extern struct ftrace_event_call __stop_ftrace_events[];
+extern struct ftrace_event_call *__start_ftrace_events[];
+extern struct ftrace_event_call *__stop_ftrace_events[];
 
 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
 
@@ -1384,7 +1384,7 @@
 
 static __init int event_trace_init(void)
 {
-	struct ftrace_event_call *call;
+	struct ftrace_event_call **call;
 	struct dentry *d_tracer;
 	struct dentry *entry;
 	struct dentry *d_events;
@@ -1430,7 +1430,7 @@
 		pr_warning("tracing: Failed to allocate common fields");
 
 	for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
-		__trace_add_event_call(call, NULL, &ftrace_event_id_fops,
+		__trace_add_event_call(*call, NULL, &ftrace_event_id_fops,
 				       &ftrace_enable_fops,
 				       &ftrace_event_filter_fops,
 				       &ftrace_event_format_fops);
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 4b74d71..bbeec31 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -161,13 +161,13 @@
 	.fields			= LIST_HEAD_INIT(event_class_ftrace_##call.fields),\
 };									\
 									\
-struct ftrace_event_call __used						\
-__attribute__((__aligned__(4)))						\
-__attribute__((section("_ftrace_events"))) event_##call = {		\
+struct ftrace_event_call __used event_##call = {			\
 	.name			= #call,				\
 	.event.type		= etype,				\
 	.class			= &event_class_ftrace_##call,		\
 	.print_fmt		= print,				\
 };									\
+struct ftrace_event_call __used						\
+__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
 
 #include "trace_entries.h"
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 5cf8c60..92b6e1e 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -453,14 +453,6 @@
  * Stubs:
  */
 
-void early_boot_irqs_off(void)
-{
-}
-
-void early_boot_irqs_on(void)
-{
-}
-
 void trace_softirqs_on(unsigned long ip)
 {
 }
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index b706529..5c9fe08 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -55,20 +55,21 @@
 	.raw_init	= init_syscall_trace,
 };
 
-extern unsigned long __start_syscalls_metadata[];
-extern unsigned long __stop_syscalls_metadata[];
+extern struct syscall_metadata *__start_syscalls_metadata[];
+extern struct syscall_metadata *__stop_syscalls_metadata[];
 
 static struct syscall_metadata **syscalls_metadata;
 
-static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
+static __init struct syscall_metadata *
+find_syscall_meta(unsigned long syscall)
 {
-	struct syscall_metadata *start;
-	struct syscall_metadata *stop;
+	struct syscall_metadata **start;
+	struct syscall_metadata **stop;
 	char str[KSYM_SYMBOL_LEN];
 
 
-	start = (struct syscall_metadata *)__start_syscalls_metadata;
-	stop = (struct syscall_metadata *)__stop_syscalls_metadata;
+	start = __start_syscalls_metadata;
+	stop = __stop_syscalls_metadata;
 	kallsyms_lookup(syscall, NULL, NULL, NULL, str);
 
 	for ( ; start < stop; start++) {
@@ -78,8 +79,8 @@
 		 * with "SyS" instead of "sys", leading to an unwanted
 		 * mismatch.
 		 */
-		if (start->name && !strcmp(start->name + 3, str + 3))
-			return start;
+		if ((*start)->name && !strcmp((*start)->name + 3, str + 3))
+			return *start;
 	}
 	return NULL;
 }
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index e95ee7f..68187af 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -27,8 +27,8 @@
 #include <linux/sched.h>
 #include <linux/jump_label.h>
 
-extern struct tracepoint __start___tracepoints[];
-extern struct tracepoint __stop___tracepoints[];
+extern struct tracepoint * const __start___tracepoints_ptrs[];
+extern struct tracepoint * const __stop___tracepoints_ptrs[];
 
 /* Set to 1 to enable tracepoint debug output */
 static const int tracepoint_debug;
@@ -298,10 +298,10 @@
  *
  * Updates the probe callback corresponding to a range of tracepoints.
  */
-void
-tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end)
+void tracepoint_update_probe_range(struct tracepoint * const *begin,
+				   struct tracepoint * const *end)
 {
-	struct tracepoint *iter;
+	struct tracepoint * const *iter;
 	struct tracepoint_entry *mark_entry;
 
 	if (!begin)
@@ -309,12 +309,12 @@
 
 	mutex_lock(&tracepoints_mutex);
 	for (iter = begin; iter < end; iter++) {
-		mark_entry = get_tracepoint(iter->name);
+		mark_entry = get_tracepoint((*iter)->name);
 		if (mark_entry) {
-			set_tracepoint(&mark_entry, iter,
+			set_tracepoint(&mark_entry, *iter,
 					!!mark_entry->refcount);
 		} else {
-			disable_tracepoint(iter);
+			disable_tracepoint(*iter);
 		}
 	}
 	mutex_unlock(&tracepoints_mutex);
@@ -326,8 +326,8 @@
 static void tracepoint_update_probes(void)
 {
 	/* Core kernel tracepoints */
-	tracepoint_update_probe_range(__start___tracepoints,
-		__stop___tracepoints);
+	tracepoint_update_probe_range(__start___tracepoints_ptrs,
+		__stop___tracepoints_ptrs);
 	/* tracepoints in modules. */
 	module_update_tracepoints();
 }
@@ -514,8 +514,8 @@
  * Will return the first tracepoint in the range if the input tracepoint is
  * NULL.
  */
-int tracepoint_get_iter_range(struct tracepoint **tracepoint,
-	struct tracepoint *begin, struct tracepoint *end)
+int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
+	struct tracepoint * const *begin, struct tracepoint * const *end)
 {
 	if (!*tracepoint && begin != end) {
 		*tracepoint = begin;
@@ -534,7 +534,8 @@
 	/* Core kernel tracepoints */
 	if (!iter->module) {
 		found = tracepoint_get_iter_range(&iter->tracepoint,
-				__start___tracepoints, __stop___tracepoints);
+				__start___tracepoints_ptrs,
+				__stop___tracepoints_ptrs);
 		if (found)
 			goto end;
 	}
@@ -585,8 +586,8 @@
 	switch (val) {
 	case MODULE_STATE_COMING:
 	case MODULE_STATE_GOING:
-		tracepoint_update_probe_range(mod->tracepoints,
-			mod->tracepoints + mod->num_tracepoints);
+		tracepoint_update_probe_range(mod->tracepoints_ptrs,
+			mod->tracepoints_ptrs + mod->num_tracepoints);
 		break;
 	}
 	return 0;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index d7ebdf4..18bb157 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -27,7 +27,7 @@
 #include <asm/irq_regs.h>
 #include <linux/perf_event.h>
 
-int watchdog_enabled;
+int watchdog_enabled = 1;
 int __read_mostly softlockup_thresh = 60;
 
 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
@@ -43,9 +43,6 @@
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 #endif
 
-static int no_watchdog;
-
-
 /* boot commands */
 /*
  * Should we panic when a soft-lockup or hard-lockup occurs:
@@ -58,7 +55,7 @@
 	if (!strncmp(str, "panic", 5))
 		hardlockup_panic = 1;
 	else if (!strncmp(str, "0", 1))
-		no_watchdog = 1;
+		watchdog_enabled = 0;
 	return 1;
 }
 __setup("nmi_watchdog=", hardlockup_panic_setup);
@@ -77,7 +74,7 @@
 
 static int __init nowatchdog_setup(char *str)
 {
-	no_watchdog = 1;
+	watchdog_enabled = 0;
 	return 1;
 }
 __setup("nowatchdog", nowatchdog_setup);
@@ -85,7 +82,7 @@
 /* deprecated */
 static int __init nosoftlockup_setup(char *str)
 {
-	no_watchdog = 1;
+	watchdog_enabled = 0;
 	return 1;
 }
 __setup("nosoftlockup", nosoftlockup_setup);
@@ -366,8 +363,14 @@
 		goto out_save;
 	}
 
-	printk(KERN_ERR "NMI watchdog disabled for cpu%i: unable to create perf event: %ld\n",
-	       cpu, PTR_ERR(event));
+
+	/* vary the KERN level based on the returned errno */
+	if (PTR_ERR(event) == -EOPNOTSUPP)
+		printk(KERN_INFO "NMI watchdog disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
+	else if (PTR_ERR(event) == -ENOENT)
+		printk(KERN_WARNING "NMI watchdog disabled (cpu%i): hardware events not enabled\n", cpu);
+	else
+		printk(KERN_ERR "NMI watchdog disabled (cpu%i): unable to create perf event: %ld\n", cpu, PTR_ERR(event));
 	return PTR_ERR(event);
 
 	/* success path */
@@ -432,9 +435,6 @@
 		wake_up_process(p);
 	}
 
-	/* if any cpu succeeds, watchdog is considered enabled for the system */
-	watchdog_enabled = 1;
-
 	return 0;
 }
 
@@ -462,12 +462,16 @@
 static void watchdog_enable_all_cpus(void)
 {
 	int cpu;
-	int result = 0;
+
+	watchdog_enabled = 0;
 
 	for_each_online_cpu(cpu)
-		result += watchdog_enable(cpu);
+		if (!watchdog_enable(cpu))
+			/* if any cpu succeeds, watchdog is considered
+			   enabled for the system */
+			watchdog_enabled = 1;
 
-	if (result)
+	if (!watchdog_enabled)
 		printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n");
 
 }
@@ -476,9 +480,6 @@
 {
 	int cpu;
 
-	if (no_watchdog)
-		return;
-
 	for_each_online_cpu(cpu)
 		watchdog_disable(cpu);
 
@@ -498,10 +499,12 @@
 {
 	proc_dointvec(table, write, buffer, length, ppos);
 
-	if (watchdog_enabled)
-		watchdog_enable_all_cpus();
-	else
-		watchdog_disable_all_cpus();
+	if (write) {
+		if (watchdog_enabled)
+			watchdog_enable_all_cpus();
+		else
+			watchdog_disable_all_cpus();
+	}
 	return 0;
 }
 
@@ -530,7 +533,8 @@
 		break;
 	case CPU_ONLINE:
 	case CPU_ONLINE_FROZEN:
-		err = watchdog_enable(hotcpu);
+		if (watchdog_enabled)
+			err = watchdog_enable(hotcpu);
 		break;
 #ifdef CONFIG_HOTPLUG_CPU
 	case CPU_UP_CANCELED:
@@ -555,9 +559,6 @@
 	void *cpu = (void *)(long)smp_processor_id();
 	int err;
 
-	if (no_watchdog)
-		return;
-
 	err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
 	WARN_ON(notifier_to_errno(err));
 
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8ee6ec8..ee6578b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -79,7 +79,9 @@
 	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */
 	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */
 
-	MAYDAY_INITIAL_TIMEOUT	= HZ / 100,	/* call for help after 10ms */
+	MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
+						/* call for help after 10ms
+						   (min two ticks) */
 	MAYDAY_INTERVAL		= HZ / 10,	/* and then every 100ms */
 	CREATE_COOLDOWN		= HZ,		/* time to breath after fail */
 	TRUSTEE_COOLDOWN	= HZ / 10,	/* for trustee draining */
@@ -768,7 +770,11 @@
 
 	worker->flags &= ~flags;
 
-	/* if transitioning out of NOT_RUNNING, increment nr_running */
+	/*
+	 * If transitioning out of NOT_RUNNING, increment nr_running.  Note
+	 * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
+	 * of multiple flags, not a single flag.
+	 */
 	if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
 		if (!(worker->flags & WORKER_NOT_RUNNING))
 			atomic_inc(get_gcwq_nr_running(gcwq->cpu));
@@ -1840,7 +1846,7 @@
 	spin_unlock_irq(&gcwq->lock);
 
 	work_clear_pending(work);
-	lock_map_acquire(&cwq->wq->lockdep_map);
+	lock_map_acquire_read(&cwq->wq->lockdep_map);
 	lock_map_acquire(&lockdep_map);
 	trace_workqueue_execute_start(work);
 	f(work);
@@ -2043,6 +2049,15 @@
 				move_linked_works(work, scheduled, &n);
 
 		process_scheduled_works(rescuer);
+
+		/*
+		 * Leave this gcwq.  If keep_working() is %true, notify a
+		 * regular worker; otherwise, we end up with 0 concurrency
+		 * and stalling the execution.
+		 */
+		if (keep_working(gcwq))
+			wake_up_worker(gcwq);
+
 		spin_unlock_irq(&gcwq->lock);
 	}
 
@@ -2384,8 +2399,18 @@
 	insert_wq_barrier(cwq, barr, work, worker);
 	spin_unlock_irq(&gcwq->lock);
 
-	lock_map_acquire(&cwq->wq->lockdep_map);
+	/*
+	 * If @max_active is 1 or rescuer is in use, flushing another work
+	 * item on the same workqueue may lead to deadlock.  Make sure the
+	 * flusher is not running on the same workqueue by verifying write
+	 * access.
+	 */
+	if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
+		lock_map_acquire(&cwq->wq->lockdep_map);
+	else
+		lock_map_acquire_read(&cwq->wq->lockdep_map);
 	lock_map_release(&cwq->wq->lockdep_map);
+
 	return true;
 already_gone:
 	spin_unlock_irq(&gcwq->lock);
@@ -2942,7 +2967,7 @@
 	 */
 	spin_lock(&workqueue_lock);
 
-	if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
+	if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
 		for_each_cwq_cpu(cpu, wq)
 			get_cwq(cpu, wq)->max_active = 0;
 
@@ -3054,7 +3079,7 @@
 
 		spin_lock_irq(&gcwq->lock);
 
-		if (!(wq->flags & WQ_FREEZEABLE) ||
+		if (!(wq->flags & WQ_FREEZABLE) ||
 		    !(gcwq->flags & GCWQ_FREEZING))
 			get_cwq(gcwq->cpu, wq)->max_active = max_active;
 
@@ -3304,7 +3329,7 @@
 	 * want to get it over with ASAP - spam rescuers, wake up as
 	 * many idlers as necessary and create new ones till the
 	 * worklist is empty.  Note that if the gcwq is frozen, there
-	 * may be frozen works in freezeable cwqs.  Don't declare
+	 * may be frozen works in freezable cwqs.  Don't declare
 	 * completion while frozen.
 	 */
 	while (gcwq->nr_workers != gcwq->nr_idle ||
@@ -3562,9 +3587,9 @@
 /**
  * freeze_workqueues_begin - begin freezing workqueues
  *
- * Start freezing workqueues.  After this function returns, all
- * freezeable workqueues will queue new works to their frozen_works
- * list instead of gcwq->worklist.
+ * Start freezing workqueues.  After this function returns, all freezable
+ * workqueues will queue new works to their frozen_works list instead of
+ * gcwq->worklist.
  *
  * CONTEXT:
  * Grabs and releases workqueue_lock and gcwq->lock's.
@@ -3590,7 +3615,7 @@
 		list_for_each_entry(wq, &workqueues, list) {
 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-			if (cwq && wq->flags & WQ_FREEZEABLE)
+			if (cwq && wq->flags & WQ_FREEZABLE)
 				cwq->max_active = 0;
 		}
 
@@ -3601,7 +3626,7 @@
 }
 
 /**
- * freeze_workqueues_busy - are freezeable workqueues still busy?
+ * freeze_workqueues_busy - are freezable workqueues still busy?
  *
  * Check whether freezing is complete.  This function must be called
  * between freeze_workqueues_begin() and thaw_workqueues().
@@ -3610,8 +3635,8 @@
  * Grabs and releases workqueue_lock.
  *
  * RETURNS:
- * %true if some freezeable workqueues are still busy.  %false if
- * freezing is complete.
+ * %true if some freezable workqueues are still busy.  %false if freezing
+ * is complete.
  */
 bool freeze_workqueues_busy(void)
 {
@@ -3631,7 +3656,7 @@
 		list_for_each_entry(wq, &workqueues, list) {
 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-			if (!cwq || !(wq->flags & WQ_FREEZEABLE))
+			if (!cwq || !(wq->flags & WQ_FREEZABLE))
 				continue;
 
 			BUG_ON(cwq->nr_active < 0);
@@ -3676,7 +3701,7 @@
 		list_for_each_entry(wq, &workqueues, list) {
 			struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-			if (!cwq || !(wq->flags & WQ_FREEZEABLE))
+			if (!cwq || !(wq->flags & WQ_FREEZABLE))
 				continue;
 
 			/* restore max_active and repopulate worklist */
diff --git a/lib/Kconfig b/lib/Kconfig
index 0ee67e0..8334342 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -201,6 +201,10 @@
        bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS
        depends on EXPERIMENTAL && BROKEN
 
+config CPU_RMAP
+	bool
+	depends on SMP
+
 #
 # Netlink attribute parsing support is select'ed if needed
 #
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2d05adb..2b97418 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -657,7 +657,7 @@
 	  Disable for production systems.
 
 config DEBUG_BUGVERBOSE
-	bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED
+	bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
 	depends on BUG
 	depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
 		   FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300
@@ -729,8 +729,8 @@
 	  If unsure, say N.
 
 config DEBUG_MEMORY_INIT
-	bool "Debug memory initialisation" if EMBEDDED
-	default !EMBEDDED
+	bool "Debug memory initialisation" if EXPERT
+	default !EXPERT
 	help
 	  Enable this for additional checks during memory initialisation.
 	  The sanity checks verify aspects of the VM such as the memory model
@@ -805,7 +805,7 @@
 config FRAME_POINTER
 	bool "Compile the kernel with frame pointers"
 	depends on DEBUG_KERNEL && \
-		(CRIS || M68K || M68KNOMMU || FRV || UML || \
+		(CRIS || M68K || FRV || UML || \
 		 AVR32 || SUPERH || BLACKFIN || MN10300) || \
 		ARCH_WANT_FRAME_POINTERS
 	default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
diff --git a/lib/Makefile b/lib/Makefile
index cbb774f..b73ba01 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -110,6 +110,8 @@
 
 obj-$(CONFIG_AVERAGE) += average.o
 
+obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
+
 hostprogs-y	:= gen_crc32table
 clean-files	:= crc32table.h
 
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
new file mode 100644
index 0000000..987acfa
--- /dev/null
+++ b/lib/cpu_rmap.c
@@ -0,0 +1,269 @@
+/*
+ * cpu_rmap.c: CPU affinity reverse-map support
+ * Copyright 2011 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/cpu_rmap.h>
+#ifdef CONFIG_GENERIC_HARDIRQS
+#include <linux/interrupt.h>
+#endif
+#include <linux/module.h>
+
+/*
+ * These functions maintain a mapping from CPUs to some ordered set of
+ * objects with CPU affinities.  This can be seen as a reverse-map of
+ * CPU affinity.  However, we do not assume that the object affinities
+ * cover all CPUs in the system.  For those CPUs not directly covered
+ * by object affinities, we attempt to find a nearest object based on
+ * CPU topology.
+ */
+
+/**
+ * alloc_cpu_rmap - allocate CPU affinity reverse-map
+ * @size: Number of objects to be mapped
+ * @flags: Allocation flags e.g. %GFP_KERNEL
+ */
+struct cpu_rmap *alloc_cpu_rmap(unsigned int size, gfp_t flags)
+{
+	struct cpu_rmap *rmap;
+	unsigned int cpu;
+	size_t obj_offset;
+
+	/* This is a silly number of objects, and we use u16 indices. */
+	if (size > 0xffff)
+		return NULL;
+
+	/* Offset of object pointer array from base structure */
+	obj_offset = ALIGN(offsetof(struct cpu_rmap, near[nr_cpu_ids]),
+			   sizeof(void *));
+
+	rmap = kzalloc(obj_offset + size * sizeof(rmap->obj[0]), flags);
+	if (!rmap)
+		return NULL;
+
+	rmap->obj = (void **)((char *)rmap + obj_offset);
+
+	/* Initially assign CPUs to objects on a rota, since we have
+	 * no idea where the objects are.  Use infinite distance, so
+	 * any object with known distance is preferable.  Include the
+	 * CPUs that are not present/online, since we definitely want
+	 * any newly-hotplugged CPUs to have some object assigned.
+	 */
+	for_each_possible_cpu(cpu) {
+		rmap->near[cpu].index = cpu % size;
+		rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
+	}
+
+	rmap->size = size;
+	return rmap;
+}
+EXPORT_SYMBOL(alloc_cpu_rmap);
+
+/* Reevaluate nearest object for given CPU, comparing with the given
+ * neighbours at the given distance.
+ */
+static bool cpu_rmap_copy_neigh(struct cpu_rmap *rmap, unsigned int cpu,
+				const struct cpumask *mask, u16 dist)
+{
+	int neigh;
+
+	for_each_cpu(neigh, mask) {
+		if (rmap->near[cpu].dist > dist &&
+		    rmap->near[neigh].dist <= dist) {
+			rmap->near[cpu].index = rmap->near[neigh].index;
+			rmap->near[cpu].dist = dist;
+			return true;
+		}
+	}
+	return false;
+}
+
+#ifdef DEBUG
+static void debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
+{
+	unsigned index;
+	unsigned int cpu;
+
+	pr_info("cpu_rmap %p, %s:\n", rmap, prefix);
+
+	for_each_possible_cpu(cpu) {
+		index = rmap->near[cpu].index;
+		pr_info("cpu %d -> obj %u (distance %u)\n",
+			cpu, index, rmap->near[cpu].dist);
+	}
+}
+#else
+static inline void
+debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
+{
+}
+#endif
+
+/**
+ * cpu_rmap_add - add object to a rmap
+ * @rmap: CPU rmap allocated with alloc_cpu_rmap()
+ * @obj: Object to add to rmap
+ *
+ * Return index of object.
+ */
+int cpu_rmap_add(struct cpu_rmap *rmap, void *obj)
+{
+	u16 index;
+
+	BUG_ON(rmap->used >= rmap->size);
+	index = rmap->used++;
+	rmap->obj[index] = obj;
+	return index;
+}
+EXPORT_SYMBOL(cpu_rmap_add);
+
+/**
+ * cpu_rmap_update - update CPU rmap following a change of object affinity
+ * @rmap: CPU rmap to update
+ * @index: Index of object whose affinity changed
+ * @affinity: New CPU affinity of object
+ */
+int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
+		    const struct cpumask *affinity)
+{
+	cpumask_var_t update_mask;
+	unsigned int cpu;
+
+	if (unlikely(!zalloc_cpumask_var(&update_mask, GFP_KERNEL)))
+		return -ENOMEM;
+
+	/* Invalidate distance for all CPUs for which this used to be
+	 * the nearest object.  Mark those CPUs for update.
+	 */
+	for_each_online_cpu(cpu) {
+		if (rmap->near[cpu].index == index) {
+			rmap->near[cpu].dist = CPU_RMAP_DIST_INF;
+			cpumask_set_cpu(cpu, update_mask);
+		}
+	}
+
+	debug_print_rmap(rmap, "after invalidating old distances");
+
+	/* Set distance to 0 for all CPUs in the new affinity mask.
+	 * Mark all CPUs within their NUMA nodes for update.
+	 */
+	for_each_cpu(cpu, affinity) {
+		rmap->near[cpu].index = index;
+		rmap->near[cpu].dist = 0;
+		cpumask_or(update_mask, update_mask,
+			   cpumask_of_node(cpu_to_node(cpu)));
+	}
+
+	debug_print_rmap(rmap, "after updating neighbours");
+
+	/* Update distances based on topology */
+	for_each_cpu(cpu, update_mask) {
+		if (cpu_rmap_copy_neigh(rmap, cpu,
+					topology_thread_cpumask(cpu), 1))
+			continue;
+		if (cpu_rmap_copy_neigh(rmap, cpu,
+					topology_core_cpumask(cpu), 2))
+			continue;
+		if (cpu_rmap_copy_neigh(rmap, cpu,
+					cpumask_of_node(cpu_to_node(cpu)), 3))
+			continue;
+		/* We could continue into NUMA node distances, but for now
+		 * we give up.
+		 */
+	}
+
+	debug_print_rmap(rmap, "after copying neighbours");
+
+	free_cpumask_var(update_mask);
+	return 0;
+}
+EXPORT_SYMBOL(cpu_rmap_update);
+
+#ifdef CONFIG_GENERIC_HARDIRQS
+
+/* Glue between IRQ affinity notifiers and CPU rmaps */
+
+struct irq_glue {
+	struct irq_affinity_notify notify;
+	struct cpu_rmap *rmap;
+	u16 index;
+};
+
+/**
+ * free_irq_cpu_rmap - free a CPU affinity reverse-map used for IRQs
+ * @rmap: Reverse-map allocated with alloc_irq_cpu_map(), or %NULL
+ *
+ * Must be called in process context, before freeing the IRQs, and
+ * without holding any locks required by global workqueue items.
+ */
+void free_irq_cpu_rmap(struct cpu_rmap *rmap)
+{
+	struct irq_glue *glue;
+	u16 index;
+
+	if (!rmap)
+		return;
+
+	for (index = 0; index < rmap->used; index++) {
+		glue = rmap->obj[index];
+		irq_set_affinity_notifier(glue->notify.irq, NULL);
+	}
+	irq_run_affinity_notifiers();
+
+	kfree(rmap);
+}
+EXPORT_SYMBOL(free_irq_cpu_rmap);
+
+static void
+irq_cpu_rmap_notify(struct irq_affinity_notify *notify, const cpumask_t *mask)
+{
+	struct irq_glue *glue =
+		container_of(notify, struct irq_glue, notify);
+	int rc;
+
+	rc = cpu_rmap_update(glue->rmap, glue->index, mask);
+	if (rc)
+		pr_warning("irq_cpu_rmap_notify: update failed: %d\n", rc);
+}
+
+static void irq_cpu_rmap_release(struct kref *ref)
+{
+	struct irq_glue *glue =
+		container_of(ref, struct irq_glue, notify.kref);
+	kfree(glue);
+}
+
+/**
+ * irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map
+ * @rmap: The reverse-map
+ * @irq: The IRQ number
+ *
+ * This adds an IRQ affinity notifier that will update the reverse-map
+ * automatically.
+ *
+ * Must be called in process context, after the IRQ is allocated but
+ * before it is bound with request_irq().
+ */
+int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
+{
+	struct irq_glue *glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+	int rc;
+
+	if (!glue)
+		return -ENOMEM;
+	glue->notify.notify = irq_cpu_rmap_notify;
+	glue->notify.release = irq_cpu_rmap_release;
+	glue->rmap = rmap;
+	glue->index = cpu_rmap_add(rmap, glue);
+	rc = irq_set_affinity_notifier(irq, &glue->notify);
+	if (rc)
+		kfree(glue);
+	return rc;
+}
+EXPORT_SYMBOL(irq_cpu_rmap_add);
+
+#endif /* CONFIG_GENERIC_HARDIRQS */
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 344c710..b8029a5 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -35,6 +35,31 @@
 }
 EXPORT_SYMBOL(__list_add);
 
+void __list_del_entry(struct list_head *entry)
+{
+	struct list_head *prev, *next;
+
+	prev = entry->prev;
+	next = entry->next;
+
+	if (WARN(next == LIST_POISON1,
+		"list_del corruption, %p->next is LIST_POISON1 (%p)\n",
+		entry, LIST_POISON1) ||
+	    WARN(prev == LIST_POISON2,
+		"list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
+		entry, LIST_POISON2) ||
+	    WARN(prev->next != entry,
+		"list_del corruption. prev->next should be %p, "
+		"but was %p\n", entry, prev->next) ||
+	    WARN(next->prev != entry,
+		"list_del corruption. next->prev should be %p, "
+		"but was %p\n", entry, next->prev))
+		return;
+
+	__list_del(prev, next);
+}
+EXPORT_SYMBOL(__list_del_entry);
+
 /**
  * list_del - deletes entry from list.
  * @entry: the element to delete from the list.
@@ -43,19 +68,7 @@
  */
 void list_del(struct list_head *entry)
 {
-	WARN(entry->next == LIST_POISON1,
-		"list_del corruption, next is LIST_POISON1 (%p)\n",
-		LIST_POISON1);
-	WARN(entry->next != LIST_POISON1 && entry->prev == LIST_POISON2,
-		"list_del corruption, prev is LIST_POISON2 (%p)\n",
-		LIST_POISON2);
-	WARN(entry->prev->next != entry,
-		"list_del corruption. prev->next should be %p, "
-		"but was %p\n", entry, entry->prev->next);
-	WARN(entry->next->prev != entry,
-		"list_del corruption. next->prev should be %p, "
-		"but was %p\n", entry, entry->next->prev);
-	__list_del(entry->prev, entry->next);
+	__list_del_entry(entry);
 	entry->next = LIST_POISON1;
 	entry->prev = LIST_POISON2;
 }
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 5086bb9..7ea2e03 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -736,10 +736,11 @@
 		}
 	}
 	/*
-	 * The iftag must have been set somewhere because otherwise
-	 * we would return immediated at the beginning of the function
+	 * We need not to tag the root tag if there is no tag which is set with
+	 * settag within the range from *first_indexp to last_index.
 	 */
-	root_tag_set(root, settag);
+	if (tagged > 0)
+		root_tag_set(root, settag);
 	*first_indexp = index;
 
 	return tagged;
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 4693f79..a16be19 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -315,6 +315,7 @@
 
 	rb_augment_path(node, func, data);
 }
+EXPORT_SYMBOL(rb_augment_insert);
 
 /*
  * before removing the node, find the deepest node on the rebalance path
@@ -340,6 +341,7 @@
 
 	return deepest;
 }
+EXPORT_SYMBOL(rb_augment_erase_begin);
 
 /*
  * after removal, update the tree to account for the removed entry
@@ -350,6 +352,7 @@
 	if (node)
 		rb_augment_path(node, func, data);
 }
+EXPORT_SYMBOL(rb_augment_erase_end);
 
 /*
  * This function returns the first node (in sort order) of the tree.
diff --git a/lib/textsearch.c b/lib/textsearch.c
index d608331..e0cc014 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -13,7 +13,7 @@
  *
  * INTRODUCTION
  *
- *   The textsearch infrastructure provides text searching facitilies for
+ *   The textsearch infrastructure provides text searching facilities for
  *   both linear and non-linear data. Individual search algorithms are
  *   implemented in modules and chosen by the user.
  *
@@ -43,7 +43,7 @@
  *       to the algorithm to store persistent variables.
  *   (4) Core eventually resets the search offset and forwards the find()
  *       request to the algorithm.
- *   (5) Algorithm calls get_next_block() provided by the user continously
+ *   (5) Algorithm calls get_next_block() provided by the user continuously
  *       to fetch the data to be searched in block by block.
  *   (6) Algorithm invokes finish() after the last call to get_next_block
  *       to clean up any leftovers from get_next_block. (Optional)
@@ -58,15 +58,15 @@
  *   the pattern to look for and flags. As a flag, you can set TS_IGNORECASE
  *   to perform case insensitive matching. But it might slow down
  *   performance of algorithm, so you should use it at own your risk.
- *   The returned configuration may then be used for an arbitary
+ *   The returned configuration may then be used for an arbitrary
  *   amount of times and even in parallel as long as a separate struct
  *   ts_state variable is provided to every instance.
  *
  *   The actual search is performed by either calling textsearch_find_-
  *   continuous() for linear data or by providing an own get_next_block()
  *   implementation and calling textsearch_find(). Both functions return
- *   the position of the first occurrence of the patern or UINT_MAX if
- *   no match was found. Subsequent occurences can be found by calling
+ *   the position of the first occurrence of the pattern or UINT_MAX if
+ *   no match was found. Subsequent occurrences can be found by calling
  *   textsearch_next() regardless of the linearity of the data.
  *
  *   Once you're done using a configuration it must be given back via
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
index e3b6e18..60a6088 100644
--- a/lib/xz/Kconfig
+++ b/lib/xz/Kconfig
@@ -7,37 +7,37 @@
 	  CRC32 is supported. See Documentation/xz.txt for more information.
 
 config XZ_DEC_X86
-	bool "x86 BCJ filter decoder" if EMBEDDED
+	bool "x86 BCJ filter decoder" if EXPERT
 	default y
 	depends on XZ_DEC
 	select XZ_DEC_BCJ
 
 config XZ_DEC_POWERPC
-	bool "PowerPC BCJ filter decoder" if EMBEDDED
+	bool "PowerPC BCJ filter decoder" if EXPERT
 	default y
 	depends on XZ_DEC
 	select XZ_DEC_BCJ
 
 config XZ_DEC_IA64
-	bool "IA-64 BCJ filter decoder" if EMBEDDED
+	bool "IA-64 BCJ filter decoder" if EXPERT
 	default y
 	depends on XZ_DEC
 	select XZ_DEC_BCJ
 
 config XZ_DEC_ARM
-	bool "ARM BCJ filter decoder" if EMBEDDED
+	bool "ARM BCJ filter decoder" if EXPERT
 	default y
 	depends on XZ_DEC
 	select XZ_DEC_BCJ
 
 config XZ_DEC_ARMTHUMB
-	bool "ARM-Thumb BCJ filter decoder" if EMBEDDED
+	bool "ARM-Thumb BCJ filter decoder" if EXPERT
 	default y
 	depends on XZ_DEC
 	select XZ_DEC_BCJ
 
 config XZ_DEC_SPARC
-	bool "SPARC BCJ filter decoder" if EMBEDDED
+	bool "SPARC BCJ filter decoder" if EXPERT
 	default y
 	depends on XZ_DEC
 	select XZ_DEC_BCJ
diff --git a/mm/Kconfig b/mm/Kconfig
index 3ad483b..e9c0c61 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -179,7 +179,7 @@
 config COMPACTION
 	bool "Allow for memory compaction"
 	select MIGRATION
-	depends on EXPERIMENTAL && HUGETLB_PAGE && MMU
+	depends on MMU
 	help
 	  Allows the compaction of memory for the allocation of huge pages.
 
diff --git a/mm/compaction.c b/mm/compaction.c
index 6d592a0..8be430b 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -406,6 +406,10 @@
 	if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
 		return COMPACT_CONTINUE;
 
+	/*
+	 * order == -1 is expected when compacting via
+	 * /proc/sys/vm/compact_memory
+	 */
 	if (cc->order == -1)
 		return COMPACT_CONTINUE;
 
@@ -454,6 +458,13 @@
 		return COMPACT_SKIPPED;
 
 	/*
+	 * order == -1 is expected when compacting via
+	 * /proc/sys/vm/compact_memory
+	 */
+	if (order == -1)
+		return COMPACT_CONTINUE;
+
+	/*
 	 * fragmentation index determines if allocation failures are due to
 	 * low memory or external fragmentation
 	 *
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 004c9c2..3e29781 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1162,7 +1162,12 @@
 		/* after clearing PageTail the gup refcount can be released */
 		smp_mb();
 
-		page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
+		/*
+		 * retain hwpoison flag of the poisoned tail page:
+		 *   fix for the unsuitable process killed on Guest Machine(KVM)
+		 *   by the memory-failure.
+		 */
+		page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
 		page_tail->flags |= (page->flags &
 				     ((1L << PG_referenced) |
 				      (1L << PG_swapbacked) |
@@ -1203,6 +1208,8 @@
 		BUG_ON(!PageDirty(page_tail));
 		BUG_ON(!PageSwapBacked(page_tail));
 
+		mem_cgroup_split_huge_fixup(page, page_tail);
+
 		lru_add_page_tail(zone, page, page_tail);
 	}
 
@@ -1804,6 +1811,8 @@
 	/* VM_PFNMAP vmas may have vm_ops null but vm_file set */
 	if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
 		goto out;
+	if (is_vma_temporary_stack(vma))
+		goto out;
 	VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
 
 	pgd = pgd_offset(mm, address);
@@ -1837,15 +1846,14 @@
 	spin_lock(ptl);
 	isolated = __collapse_huge_page_isolate(vma, address, pte);
 	spin_unlock(ptl);
-	pte_unmap(pte);
 
 	if (unlikely(!isolated)) {
+		pte_unmap(pte);
 		spin_lock(&mm->page_table_lock);
 		BUG_ON(!pmd_none(*pmd));
 		set_pmd_at(mm, address, pmd, _pmd);
 		spin_unlock(&mm->page_table_lock);
 		anon_vma_unlock(vma->anon_vma);
-		mem_cgroup_uncharge_page(new_page);
 		goto out;
 	}
 
@@ -1856,6 +1864,7 @@
 	anon_vma_unlock(vma->anon_vma);
 
 	__collapse_huge_page_copy(pte, new_page, vma, address, ptl);
+	pte_unmap(pte);
 	__SetPageUptodate(new_page);
 	pgtable = pmd_pgtable(_pmd);
 	VM_BUG_ON(page_count(pgtable) != 1);
@@ -1890,6 +1899,7 @@
 	return;
 
 out:
+	mem_cgroup_uncharge_page(new_page);
 #ifdef CONFIG_NUMA
 	put_page(new_page);
 #endif
@@ -2024,32 +2034,27 @@
 		if ((!(vma->vm_flags & VM_HUGEPAGE) &&
 		     !khugepaged_always()) ||
 		    (vma->vm_flags & VM_NOHUGEPAGE)) {
+		skip:
 			progress++;
 			continue;
 		}
-
 		/* VM_PFNMAP vmas may have vm_ops null but vm_file set */
-		if (!vma->anon_vma || vma->vm_ops || vma->vm_file) {
-			khugepaged_scan.address = vma->vm_end;
-			progress++;
-			continue;
-		}
+		if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
+			goto skip;
+		if (is_vma_temporary_stack(vma))
+			goto skip;
+
 		VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
 
 		hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
 		hend = vma->vm_end & HPAGE_PMD_MASK;
-		if (hstart >= hend) {
-			progress++;
-			continue;
-		}
+		if (hstart >= hend)
+			goto skip;
+		if (khugepaged_scan.address > hend)
+			goto skip;
 		if (khugepaged_scan.address < hstart)
 			khugepaged_scan.address = hstart;
-		if (khugepaged_scan.address > hend) {
-			khugepaged_scan.address = hend + HPAGE_PMD_SIZE;
-			progress++;
-			continue;
-		}
-		BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
+		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
 
 		while (khugepaged_scan.address < hend) {
 			int ret;
@@ -2078,7 +2083,7 @@
 breakouterloop_mmap_sem:
 
 	spin_lock(&khugepaged_mm_lock);
-	BUG_ON(khugepaged_scan.mm_slot != mm_slot);
+	VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
 	/*
 	 * Release the current mm_slot if this mm is about to die, or
 	 * if we scanned all vmas of this mm.
@@ -2233,9 +2238,9 @@
 
 	for (;;) {
 		mutex_unlock(&khugepaged_mutex);
-		BUG_ON(khugepaged_thread != current);
+		VM_BUG_ON(khugepaged_thread != current);
 		khugepaged_loop();
-		BUG_ON(khugepaged_thread != current);
+		VM_BUG_ON(khugepaged_thread != current);
 
 		mutex_lock(&khugepaged_mutex);
 		if (!khugepaged_enabled())
diff --git a/mm/kmemleak-test.c b/mm/kmemleak-test.c
index 177a516..ff0d977 100644
--- a/mm/kmemleak-test.c
+++ b/mm/kmemleak-test.c
@@ -75,13 +75,11 @@
 	 * after the module is removed.
 	 */
 	for (i = 0; i < 10; i++) {
-		elem = kmalloc(sizeof(*elem), GFP_KERNEL);
-		pr_info("kmemleak: kmalloc(sizeof(*elem)) = %p\n", elem);
+		elem = kzalloc(sizeof(*elem), GFP_KERNEL);
+		pr_info("kmemleak: kzalloc(sizeof(*elem)) = %p\n", elem);
 		if (!elem)
 			return -ENOMEM;
-		memset(elem, 0, sizeof(*elem));
 		INIT_LIST_HEAD(&elem->list);
-
 		list_add_tail(&elem->list, &test_list);
 	}
 
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index bd9bc21..84225f3 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -113,7 +113,9 @@
 #define BYTES_PER_POINTER	sizeof(void *)
 
 /* GFP bitmask for kmemleak internal allocations */
-#define GFP_KMEMLEAK_MASK	(GFP_KERNEL | GFP_ATOMIC)
+#define gfp_kmemleak_mask(gfp)	(((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
+				 __GFP_NORETRY | __GFP_NOMEMALLOC | \
+				 __GFP_NOWARN)
 
 /* scanning area inside a memory block */
 struct kmemleak_scan_area {
@@ -511,9 +513,10 @@
 	struct kmemleak_object *object;
 	struct prio_tree_node *node;
 
-	object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK);
+	object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
 	if (!object) {
-		kmemleak_stop("Cannot allocate a kmemleak_object structure\n");
+		pr_warning("Cannot allocate a kmemleak_object structure\n");
+		kmemleak_disable();
 		return NULL;
 	}
 
@@ -734,9 +737,9 @@
 		return;
 	}
 
-	area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK);
+	area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
 	if (!area) {
-		kmemleak_warn("Cannot allocate a scan area\n");
+		pr_warning("Cannot allocate a scan area\n");
 		goto out;
 	}
 
diff --git a/mm/memblock.c b/mm/memblock.c
index 400dc62..4618fda 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -137,8 +137,6 @@
 
 	BUG_ON(0 == size);
 
-	size = memblock_align_up(size, align);
-
 	/* Pump up max_addr */
 	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
 		end = memblock.current_limit;
@@ -683,13 +681,13 @@
 
 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
 {
-	int idx = memblock_search(&memblock.reserved, base);
+	int idx = memblock_search(&memblock.memory, base);
 
 	if (idx == -1)
 		return 0;
-	return memblock.reserved.regions[idx].base <= base &&
-		(memblock.reserved.regions[idx].base +
-		 memblock.reserved.regions[idx].size) >= (base + size);
+	return memblock.memory.regions[idx].base <= base &&
+		(memblock.memory.regions[idx].base +
+		 memblock.memory.regions[idx].size) >= (base + size);
 }
 
 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8ab8410..da53a25 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -600,23 +600,24 @@
 }
 
 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
-					 struct page_cgroup *pc,
-					 bool charge)
+					 bool file, int nr_pages)
 {
-	int val = (charge) ? 1 : -1;
-
 	preempt_disable();
 
-	if (PageCgroupCache(pc))
-		__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], val);
+	if (file)
+		__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages);
 	else
-		__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], val);
+		__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages);
 
-	if (charge)
+	/* pagein of a big page is an event. So, ignore page size */
+	if (nr_pages > 0)
 		__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
-	else
+	else {
 		__this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
-	__this_cpu_inc(mem->stat->count[MEM_CGROUP_EVENTS]);
+		nr_pages = -nr_pages; /* for event */
+	}
+
+	__this_cpu_add(mem->stat->count[MEM_CGROUP_EVENTS], nr_pages);
 
 	preempt_enable();
 }
@@ -815,7 +816,8 @@
 	 * removed from global LRU.
 	 */
 	mz = page_cgroup_zoneinfo(pc);
-	MEM_CGROUP_ZSTAT(mz, lru) -= 1;
+	/* huge page split is done under lru_lock. so, we have no races. */
+	MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
 	if (mem_cgroup_is_root(pc->mem_cgroup))
 		return;
 	VM_BUG_ON(list_empty(&pc->lru));
@@ -836,13 +838,12 @@
 		return;
 
 	pc = lookup_page_cgroup(page);
-	/*
-	 * Used bit is set without atomic ops but after smp_wmb().
-	 * For making pc->mem_cgroup visible, insert smp_rmb() here.
-	 */
-	smp_rmb();
 	/* unused or root page is not rotated. */
-	if (!PageCgroupUsed(pc) || mem_cgroup_is_root(pc->mem_cgroup))
+	if (!PageCgroupUsed(pc))
+		return;
+	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
+	smp_rmb();
+	if (mem_cgroup_is_root(pc->mem_cgroup))
 		return;
 	mz = page_cgroup_zoneinfo(pc);
 	list_move(&pc->lru, &mz->lists[lru]);
@@ -857,16 +858,13 @@
 		return;
 	pc = lookup_page_cgroup(page);
 	VM_BUG_ON(PageCgroupAcctLRU(pc));
-	/*
-	 * Used bit is set without atomic ops but after smp_wmb().
-	 * For making pc->mem_cgroup visible, insert smp_rmb() here.
-	 */
-	smp_rmb();
 	if (!PageCgroupUsed(pc))
 		return;
-
+	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
+	smp_rmb();
 	mz = page_cgroup_zoneinfo(pc);
-	MEM_CGROUP_ZSTAT(mz, lru) += 1;
+	/* huge page split is done under lru_lock. so, we have no races. */
+	MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
 	SetPageCgroupAcctLRU(pc);
 	if (mem_cgroup_is_root(pc->mem_cgroup))
 		return;
@@ -1030,14 +1028,10 @@
 		return NULL;
 
 	pc = lookup_page_cgroup(page);
-	/*
-	 * Used bit is set without atomic ops but after smp_wmb().
-	 * For making pc->mem_cgroup visible, insert smp_rmb() here.
-	 */
-	smp_rmb();
 	if (!PageCgroupUsed(pc))
 		return NULL;
-
+	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
+	smp_rmb();
 	mz = page_cgroup_zoneinfo(pc);
 	if (!mz)
 		return NULL;
@@ -1119,6 +1113,23 @@
 	return false;
 }
 
+/**
+ * mem_cgroup_check_margin - check if the memory cgroup allows charging
+ * @mem: memory cgroup to check
+ * @bytes: the number of bytes the caller intends to charge
+ *
+ * Returns a boolean value on whether @mem can be charged @bytes or
+ * whether this would exceed the limit.
+ */
+static bool mem_cgroup_check_margin(struct mem_cgroup *mem, unsigned long bytes)
+{
+	if (!res_counter_check_margin(&mem->res, bytes))
+		return false;
+	if (do_swap_account && !res_counter_check_margin(&mem->memsw, bytes))
+		return false;
+	return true;
+}
+
 static unsigned int get_swappiness(struct mem_cgroup *memcg)
 {
 	struct cgroup *cgrp = memcg->css.cgroup;
@@ -1615,7 +1626,7 @@
 	if (unlikely(!mem || !PageCgroupUsed(pc)))
 		goto out;
 	/* pc->mem_cgroup is unstable ? */
-	if (unlikely(mem_cgroup_stealed(mem))) {
+	if (unlikely(mem_cgroup_stealed(mem)) || PageTransHuge(page)) {
 		/* take a lock against to access pc->mem_cgroup */
 		move_lock_page_cgroup(pc, &flags);
 		need_unlock = true;
@@ -1840,27 +1851,39 @@
 		if (likely(!ret))
 			return CHARGE_OK;
 
+		res_counter_uncharge(&mem->res, csize);
 		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
 		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
 	} else
 		mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
-
-	if (csize > PAGE_SIZE) /* change csize and retry */
+	/*
+	 * csize can be either a huge page (HPAGE_SIZE), a batch of
+	 * regular pages (CHARGE_SIZE), or a single regular page
+	 * (PAGE_SIZE).
+	 *
+	 * Never reclaim on behalf of optional batching, retry with a
+	 * single page instead.
+	 */
+	if (csize == CHARGE_SIZE)
 		return CHARGE_RETRY;
 
 	if (!(gfp_mask & __GFP_WAIT))
 		return CHARGE_WOULDBLOCK;
 
 	ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
-					gfp_mask, flags);
+					      gfp_mask, flags);
+	if (mem_cgroup_check_margin(mem_over_limit, csize))
+		return CHARGE_RETRY;
 	/*
-	 * try_to_free_mem_cgroup_pages() might not give us a full
-	 * picture of reclaim. Some pages are reclaimed and might be
-	 * moved to swap cache or just unmapped from the cgroup.
-	 * Check the limit again to see if the reclaim reduced the
-	 * current usage of the cgroup before giving up
+	 * Even though the limit is exceeded at this point, reclaim
+	 * may have been able to free some pages.  Retry the charge
+	 * before killing the task.
+	 *
+	 * Only for regular pages, though: huge pages are rather
+	 * unlikely to succeed so close to the limit, and we fall back
+	 * to regular pages anyway in case of failure.
 	 */
-	if (ret || mem_cgroup_check_under_limit(mem_over_limit))
+	if (csize == PAGE_SIZE && ret)
 		return CHARGE_RETRY;
 
 	/*
@@ -2084,14 +2107,27 @@
 	return mem;
 }
 
-/*
- * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
- * USED state. If already USED, uncharge and return.
- */
-static void ____mem_cgroup_commit_charge(struct mem_cgroup *mem,
-					 struct page_cgroup *pc,
-					 enum charge_type ctype)
+static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
+				       struct page_cgroup *pc,
+				       enum charge_type ctype,
+				       int page_size)
 {
+	int nr_pages = page_size >> PAGE_SHIFT;
+
+	/* try_charge() can return NULL to *memcg, taking care of it. */
+	if (!mem)
+		return;
+
+	lock_page_cgroup(pc);
+	if (unlikely(PageCgroupUsed(pc))) {
+		unlock_page_cgroup(pc);
+		mem_cgroup_cancel_charge(mem, page_size);
+		return;
+	}
+	/*
+	 * we don't need page_cgroup_lock about tail pages, becase they are not
+	 * accessed by any other context at this point.
+	 */
 	pc->mem_cgroup = mem;
 	/*
 	 * We access a page_cgroup asynchronously without lock_page_cgroup().
@@ -2115,35 +2151,7 @@
 		break;
 	}
 
-	mem_cgroup_charge_statistics(mem, pc, true);
-}
-
-static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
-				       struct page_cgroup *pc,
-				       enum charge_type ctype,
-				       int page_size)
-{
-	int i;
-	int count = page_size >> PAGE_SHIFT;
-
-	/* try_charge() can return NULL to *memcg, taking care of it. */
-	if (!mem)
-		return;
-
-	lock_page_cgroup(pc);
-	if (unlikely(PageCgroupUsed(pc))) {
-		unlock_page_cgroup(pc);
-		mem_cgroup_cancel_charge(mem, page_size);
-		return;
-	}
-
-	/*
-	 * we don't need page_cgroup_lock about tail pages, becase they are not
-	 * accessed by any other context at this point.
-	 */
-	for (i = 0; i < count; i++)
-		____mem_cgroup_commit_charge(mem, pc + i, ctype);
-
+	mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), nr_pages);
 	unlock_page_cgroup(pc);
 	/*
 	 * "charge_statistics" updated event counter. Then, check it.
@@ -2153,6 +2161,48 @@
 	memcg_check_events(mem, pc->page);
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+
+#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\
+			(1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION))
+/*
+ * Because tail pages are not marked as "used", set it. We're under
+ * zone->lru_lock, 'splitting on pmd' and compund_lock.
+ */
+void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
+{
+	struct page_cgroup *head_pc = lookup_page_cgroup(head);
+	struct page_cgroup *tail_pc = lookup_page_cgroup(tail);
+	unsigned long flags;
+
+	if (mem_cgroup_disabled())
+		return;
+	/*
+	 * We have no races with charge/uncharge but will have races with
+	 * page state accounting.
+	 */
+	move_lock_page_cgroup(head_pc, &flags);
+
+	tail_pc->mem_cgroup = head_pc->mem_cgroup;
+	smp_wmb(); /* see __commit_charge() */
+	if (PageCgroupAcctLRU(head_pc)) {
+		enum lru_list lru;
+		struct mem_cgroup_per_zone *mz;
+
+		/*
+		 * LRU flags cannot be copied because we need to add tail
+		 *.page to LRU by generic call and our hook will be called.
+		 * We hold lru_lock, then, reduce counter directly.
+		 */
+		lru = page_lru(head);
+		mz = page_cgroup_zoneinfo(head_pc);
+		MEM_CGROUP_ZSTAT(mz, lru) -= 1;
+	}
+	tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
+	move_unlock_page_cgroup(head_pc, &flags);
+}
+#endif
+
 /**
  * __mem_cgroup_move_account - move account of the page
  * @pc:	page_cgroup of the page.
@@ -2171,8 +2221,11 @@
  */
 
 static void __mem_cgroup_move_account(struct page_cgroup *pc,
-	struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
+	struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge,
+	int charge_size)
 {
+	int nr_pages = charge_size >> PAGE_SHIFT;
+
 	VM_BUG_ON(from == to);
 	VM_BUG_ON(PageLRU(pc->page));
 	VM_BUG_ON(!page_is_cgroup_locked(pc));
@@ -2186,14 +2239,14 @@
 		__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
 		preempt_enable();
 	}
-	mem_cgroup_charge_statistics(from, pc, false);
+	mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
 	if (uncharge)
 		/* This is not "cancel", but cancel_charge does all we need. */
-		mem_cgroup_cancel_charge(from, PAGE_SIZE);
+		mem_cgroup_cancel_charge(from, charge_size);
 
 	/* caller should have done css_get */
 	pc->mem_cgroup = to;
-	mem_cgroup_charge_statistics(to, pc, true);
+	mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages);
 	/*
 	 * We charges against "to" which may not have any tasks. Then, "to"
 	 * can be under rmdir(). But in current implementation, caller of
@@ -2208,15 +2261,24 @@
  * __mem_cgroup_move_account()
  */
 static int mem_cgroup_move_account(struct page_cgroup *pc,
-		struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
+		struct mem_cgroup *from, struct mem_cgroup *to,
+		bool uncharge, int charge_size)
 {
 	int ret = -EINVAL;
 	unsigned long flags;
+	/*
+	 * The page is isolated from LRU. So, collapse function
+	 * will not handle this page. But page splitting can happen.
+	 * Do this check under compound_page_lock(). The caller should
+	 * hold it.
+	 */
+	if ((charge_size > PAGE_SIZE) && !PageTransHuge(pc->page))
+		return -EBUSY;
 
 	lock_page_cgroup(pc);
 	if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
 		move_lock_page_cgroup(pc, &flags);
-		__mem_cgroup_move_account(pc, from, to, uncharge);
+		__mem_cgroup_move_account(pc, from, to, uncharge, charge_size);
 		move_unlock_page_cgroup(pc, &flags);
 		ret = 0;
 	}
@@ -2241,6 +2303,8 @@
 	struct cgroup *cg = child->css.cgroup;
 	struct cgroup *pcg = cg->parent;
 	struct mem_cgroup *parent;
+	int page_size = PAGE_SIZE;
+	unsigned long flags;
 	int ret;
 
 	/* Is ROOT ? */
@@ -2253,15 +2317,24 @@
 	if (isolate_lru_page(page))
 		goto put;
 
+	if (PageTransHuge(page))
+		page_size = HPAGE_SIZE;
+
 	parent = mem_cgroup_from_cont(pcg);
-	ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false,
-				      PAGE_SIZE);
+	ret = __mem_cgroup_try_charge(NULL, gfp_mask,
+				&parent, false, page_size);
 	if (ret || !parent)
 		goto put_back;
 
-	ret = mem_cgroup_move_account(pc, child, parent, true);
+	if (page_size > PAGE_SIZE)
+		flags = compound_lock_irqsave(page);
+
+	ret = mem_cgroup_move_account(pc, child, parent, true, page_size);
 	if (ret)
-		mem_cgroup_cancel_charge(parent, PAGE_SIZE);
+		mem_cgroup_cancel_charge(parent, page_size);
+
+	if (page_size > PAGE_SIZE)
+		compound_unlock_irqrestore(page, flags);
 put_back:
 	putback_lru_page(page);
 put:
@@ -2280,13 +2353,19 @@
 				gfp_t gfp_mask, enum charge_type ctype)
 {
 	struct mem_cgroup *mem = NULL;
-	struct page_cgroup *pc;
-	int ret;
 	int page_size = PAGE_SIZE;
+	struct page_cgroup *pc;
+	bool oom = true;
+	int ret;
 
 	if (PageTransHuge(page)) {
 		page_size <<= compound_order(page);
 		VM_BUG_ON(!PageTransHuge(page));
+		/*
+		 * Never OOM-kill a process for a huge page.  The
+		 * fault handler will fall back to regular pages.
+		 */
+		oom = false;
 	}
 
 	pc = lookup_page_cgroup(page);
@@ -2295,7 +2374,7 @@
 		return 0;
 	prefetchw(pc);
 
-	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true, page_size);
+	ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, oom, page_size);
 	if (ret || !mem)
 		return ret;
 
@@ -2546,7 +2625,6 @@
 static struct mem_cgroup *
 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
 {
-	int i;
 	int count;
 	struct page_cgroup *pc;
 	struct mem_cgroup *mem = NULL;
@@ -2596,8 +2674,7 @@
 		break;
 	}
 
-	for (i = 0; i < count; i++)
-		mem_cgroup_charge_statistics(mem, pc + i, false);
+	mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -count);
 
 	ClearPageCgroupUsed(pc);
 	/*
@@ -4844,7 +4921,7 @@
 				goto put;
 			pc = lookup_page_cgroup(page);
 			if (!mem_cgroup_move_account(pc,
-						mc.from, mc.to, false)) {
+					mc.from, mc.to, false, PAGE_SIZE)) {
 				mc.precharge--;
 				/* we uncharge from mc.from later. */
 				mc.moved_charge++;
@@ -4983,9 +5060,9 @@
 static int __init enable_swap_account(char *s)
 {
 	/* consider enabled if no parameter or 1 is given */
-	if (!s || !strcmp(s, "1"))
+	if (!(*s) || !strcmp(s, "=1"))
 		really_do_swap_account = 1;
-	else if (!strcmp(s, "0"))
+	else if (!strcmp(s, "=0"))
 		really_do_swap_account = 0;
 	return 1;
 }
@@ -4993,7 +5070,8 @@
 
 static int __init disable_swap_account(char *s)
 {
-	enable_swap_account("0");
+	printk_once("noswapaccount is deprecated and will be removed in 2.6.40. Use swapaccount=0 instead\n");
+	enable_swap_account("=0");
 	return 1;
 }
 __setup("noswapaccount", disable_swap_account);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 548fbd7..0207c2f 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -233,8 +233,8 @@
 	}
 
 	/*
-	 * Only all shrink_slab here (which would also
-	 * shrink other caches) if access is not potentially fatal.
+	 * Only call shrink_slab here (which would also shrink other caches) if
+	 * access is not potentially fatal.
 	 */
 	if (access) {
 		int nr;
@@ -386,8 +386,6 @@
 	struct task_struct *tsk;
 	struct anon_vma *av;
 
-	if (!PageHuge(page) && unlikely(split_huge_page(page)))
-		return;
 	read_lock(&tasklist_lock);
 	av = page_lock_anon_vma(page);
 	if (av == NULL)	/* Not actually mapped anymore */
@@ -856,6 +854,7 @@
 	int ret;
 	int kill = 1;
 	struct page *hpage = compound_head(p);
+	struct page *ppage;
 
 	if (PageReserved(p) || PageSlab(p))
 		return SWAP_SUCCESS;
@@ -897,6 +896,44 @@
 	}
 
 	/*
+	 * ppage: poisoned page
+	 *   if p is regular page(4k page)
+	 *        ppage == real poisoned page;
+	 *   else p is hugetlb or THP, ppage == head page.
+	 */
+	ppage = hpage;
+
+	if (PageTransHuge(hpage)) {
+		/*
+		 * Verify that this isn't a hugetlbfs head page, the check for
+		 * PageAnon is just for avoid tripping a split_huge_page
+		 * internal debug check, as split_huge_page refuses to deal with
+		 * anything that isn't an anon page. PageAnon can't go away fro
+		 * under us because we hold a refcount on the hpage, without a
+		 * refcount on the hpage. split_huge_page can't be safely called
+		 * in the first place, having a refcount on the tail isn't
+		 * enough * to be safe.
+		 */
+		if (!PageHuge(hpage) && PageAnon(hpage)) {
+			if (unlikely(split_huge_page(hpage))) {
+				/*
+				 * FIXME: if splitting THP is failed, it is
+				 * better to stop the following operation rather
+				 * than causing panic by unmapping. System might
+				 * survive if the page is freed later.
+				 */
+				printk(KERN_INFO
+					"MCE %#lx: failed to split THP\n", pfn);
+
+				BUG_ON(!PageHWPoison(p));
+				return SWAP_FAIL;
+			}
+			/* THP is split, so ppage should be the real poisoned page. */
+			ppage = p;
+		}
+	}
+
+	/*
 	 * First collect all the processes that have the page
 	 * mapped in dirty form.  This has to be done before try_to_unmap,
 	 * because ttu takes the rmap data structures down.
@@ -905,12 +942,18 @@
 	 * there's nothing that can be done.
 	 */
 	if (kill)
-		collect_procs(hpage, &tokill);
+		collect_procs(ppage, &tokill);
 
-	ret = try_to_unmap(hpage, ttu);
+	if (hpage != ppage)
+		lock_page_nosync(ppage);
+
+	ret = try_to_unmap(ppage, ttu);
 	if (ret != SWAP_SUCCESS)
 		printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n",
-				pfn, page_mapcount(hpage));
+				pfn, page_mapcount(ppage));
+
+	if (hpage != ppage)
+		unlock_page(ppage);
 
 	/*
 	 * Now that the dirty bit has been propagated to the
@@ -921,7 +964,7 @@
 	 * use a more force-full uncatchable kill to prevent
 	 * any accesses to the poisoned memory.
 	 */
-	kill_procs_ao(&tokill, !!PageDirty(hpage), trapno,
+	kill_procs_ao(&tokill, !!PageDirty(ppage), trapno,
 		      ret != SWAP_SUCCESS, p, pfn);
 
 	return ret;
@@ -1022,19 +1065,22 @@
 	 * The check (unnecessarily) ignores LRU pages being isolated and
 	 * walked by the page reclaim code, however that's not a big loss.
 	 */
-	if (!PageLRU(p) && !PageHuge(p))
-		shake_page(p, 0);
-	if (!PageLRU(p) && !PageHuge(p)) {
-		/*
-		 * shake_page could have turned it free.
-		 */
-		if (is_free_buddy_page(p)) {
-			action_result(pfn, "free buddy, 2nd try", DELAYED);
-			return 0;
+	if (!PageHuge(p) && !PageTransCompound(p)) {
+		if (!PageLRU(p))
+			shake_page(p, 0);
+		if (!PageLRU(p)) {
+			/*
+			 * shake_page could have turned it free.
+			 */
+			if (is_free_buddy_page(p)) {
+				action_result(pfn, "free buddy, 2nd try",
+						DELAYED);
+				return 0;
+			}
+			action_result(pfn, "non LRU", IGNORED);
+			put_page(p);
+			return -EBUSY;
 		}
-		action_result(pfn, "non LRU", IGNORED);
-		put_page(p);
-		return -EBUSY;
 	}
 
 	/*
@@ -1064,7 +1110,7 @@
 	 * For error on the tail page, we should set PG_hwpoison
 	 * on the head page to show that the hugepage is hwpoisoned
 	 */
-	if (PageTail(p) && TestSetPageHWPoison(hpage)) {
+	if (PageHuge(p) && PageTail(p) && TestSetPageHWPoison(hpage)) {
 		action_result(pfn, "hugepage already hardware poisoned",
 				IGNORED);
 		unlock_page(hpage);
@@ -1295,7 +1341,10 @@
 	ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0,
 				true);
 	if (ret) {
-		putback_lru_pages(&pagelist);
+		struct page *page1, *page2;
+		list_for_each_entry_safe(page1, page2, &pagelist, lru)
+			put_page(page1);
+
 		pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
 			 pfn, ret, page->flags);
 		if (ret > 0)
@@ -1419,6 +1468,7 @@
 		ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
 								0, true);
 		if (ret) {
+			putback_lru_pages(&pagelist);
 			pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
 				pfn, ret, page->flags);
 			if (ret > 0)
diff --git a/mm/memory.c b/mm/memory.c
index 31250fa..8e8c1832 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2219,7 +2219,6 @@
 							 &ptl);
 			if (!pte_same(*page_table, orig_pte)) {
 				unlock_page(old_page);
-				page_cache_release(old_page);
 				goto unlock;
 			}
 			page_cache_release(old_page);
@@ -2289,7 +2288,6 @@
 							 &ptl);
 			if (!pte_same(*page_table, orig_pte)) {
 				unlock_page(old_page);
-				page_cache_release(old_page);
 				goto unlock;
 			}
 
@@ -2367,16 +2365,6 @@
 	}
 	__SetPageUptodate(new_page);
 
-	/*
-	 * Don't let another task, with possibly unlocked vma,
-	 * keep the mlocked page.
-	 */
-	if ((vma->vm_flags & VM_LOCKED) && old_page) {
-		lock_page(old_page);	/* for LRU manipulation */
-		clear_page_mlock(old_page);
-		unlock_page(old_page);
-	}
-
 	if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
 		goto oom_free_new;
 
@@ -2444,10 +2432,20 @@
 
 	if (new_page)
 		page_cache_release(new_page);
-	if (old_page)
-		page_cache_release(old_page);
 unlock:
 	pte_unmap_unlock(page_table, ptl);
+	if (old_page) {
+		/*
+		 * Don't let another task, with possibly unlocked vma,
+		 * keep the mlocked page.
+		 */
+		if ((ret & VM_FAULT_WRITE) && (vma->vm_flags & VM_LOCKED)) {
+			lock_page(old_page);	/* LRU manipulation */
+			munlock_vma_page(old_page);
+			unlock_page(old_page);
+		}
+		page_cache_release(old_page);
+	}
 	return ret;
 oom_free_new:
 	page_cache_release(new_page);
@@ -3053,12 +3051,6 @@
 				goto out;
 			}
 			charged = 1;
-			/*
-			 * Don't let another task, with possibly unlocked vma,
-			 * keep the mlocked page.
-			 */
-			if (vma->vm_flags & VM_LOCKED)
-				clear_page_mlock(vmf.page);
 			copy_user_highpage(page, vmf.page, address, vma);
 			__SetPageUptodate(page);
 		} else {
diff --git a/mm/migrate.c b/mm/migrate.c
index 46fe8cc..7661152 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -772,6 +772,7 @@
 unlock:
 	unlock_page(page);
 
+move_newpage:
 	if (rc != -EAGAIN) {
  		/*
  		 * A page that has been migrated has all references
@@ -785,8 +786,6 @@
 		putback_lru_page(page);
 	}
 
-move_newpage:
-
 	/*
 	 * Move the new page to the LRU. If migration was not successful
 	 * then this will free the page.
@@ -888,7 +887,7 @@
  * are movable anymore because to has become empty
  * or no retryable pages exist anymore.
  * Caller should call putback_lru_pages to return pages to the LRU
- * or free list.
+ * or free list only if ret != 0.
  *
  * Return: Number of pages not migrated or error code.
  */
@@ -981,10 +980,6 @@
 	}
 	rc = 0;
 out:
-
-	list_for_each_entry_safe(page, page2, from, lru)
-		put_page(page);
-
 	if (rc)
 		return rc;
 
diff --git a/mm/mlock.c b/mm/mlock.c
index 13e81ee..c3924c7f 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -178,6 +178,13 @@
 	if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
 		gup_flags |= FOLL_WRITE;
 
+	/*
+	 * We want mlock to succeed for regions that have any permissions
+	 * other than PROT_NONE.
+	 */
+	if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
+		gup_flags |= FOLL_FORCE;
+
 	if (vma->vm_flags & VM_LOCKED)
 		gup_flags |= FOLL_MLOCK;
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 90c1439..a873e61 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1088,8 +1088,10 @@
 		pset = per_cpu_ptr(zone->pageset, cpu);
 
 		pcp = &pset->pcp;
-		free_pcppages_bulk(zone, pcp->count, pcp);
-		pcp->count = 0;
+		if (pcp->count) {
+			free_pcppages_bulk(zone, pcp->count, pcp);
+			pcp->count = 0;
+		}
 		local_irq_restore(flags);
 	}
 }
@@ -2034,6 +2036,14 @@
 	 */
 	alloc_flags = gfp_to_alloc_flags(gfp_mask);
 
+	/*
+	 * Find the true preferred zone if the allocation is unconstrained by
+	 * cpusets.
+	 */
+	if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
+		first_zones_zonelist(zonelist, high_zoneidx, NULL,
+					&preferred_zone);
+
 	/* This is the last chance, in general, before the goto nopage. */
 	page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
 			high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
@@ -2192,7 +2202,9 @@
 
 	get_mems_allowed();
 	/* The preferred zone is used for statistics later */
-	first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
+	first_zones_zonelist(zonelist, high_zoneidx,
+				nodemask ? : &cpuset_current_mems_allowed,
+				&preferred_zone);
 	if (!preferred_zone) {
 		put_mems_allowed();
 		return NULL;
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 0369f5b..eb663fb 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -6,6 +6,7 @@
  *  Copyright (C) 2010  Linus Torvalds
  */
 
+#include <linux/pagemap.h>
 #include <asm/tlb.h>
 #include <asm-generic/pgtable.h>
 
diff --git a/mm/truncate.c b/mm/truncate.c
index 3c2d5dd..49feb46 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -549,13 +549,12 @@
  * @inode: inode
  * @newsize: new file size
  *
- * truncate_setsize updastes i_size update and performs pagecache
- * truncation (if necessary) for a file size updates. It will be
- * typically be called from the filesystem's setattr function when
- * ATTR_SIZE is passed in.
+ * truncate_setsize updates i_size and performs pagecache truncation (if
+ * necessary) to @newsize. It will be typically be called from the filesystem's
+ * setattr function when ATTR_SIZE is passed in.
  *
- * Must be called with inode_mutex held and after all filesystem
- * specific block truncation has been performed.
+ * Must be called with inode_mutex held and before all filesystem specific
+ * block truncation has been performed.
  */
 void truncate_setsize(struct inode *inode, loff_t newsize)
 {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 47a5096..17497d0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -41,7 +41,6 @@
 #include <linux/memcontrol.h>
 #include <linux/delayacct.h>
 #include <linux/sysctl.h>
-#include <linux/compaction.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -1883,12 +1882,12 @@
 	unsigned long nr[NR_LRU_LISTS];
 	unsigned long nr_to_scan;
 	enum lru_list l;
-	unsigned long nr_reclaimed;
+	unsigned long nr_reclaimed, nr_scanned;
 	unsigned long nr_to_reclaim = sc->nr_to_reclaim;
-	unsigned long nr_scanned = sc->nr_scanned;
 
 restart:
 	nr_reclaimed = 0;
+	nr_scanned = sc->nr_scanned;
 	get_scan_count(zone, sc, nr, priority);
 
 	while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -2084,7 +2083,8 @@
 			struct zone *preferred_zone;
 
 			first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
-							NULL, &preferred_zone);
+						&cpuset_current_mems_allowed,
+						&preferred_zone);
 			wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
 		}
 	}
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 6e64f7c..7850412 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -327,7 +327,7 @@
 static void vlan_transfer_features(struct net_device *dev,
 				   struct net_device *vlandev)
 {
-	unsigned long old_features = vlandev->features;
+	u32 old_features = vlandev->features;
 
 	vlandev->features &= ~dev->vlan_features;
 	vlandev->features |= dev->features & dev->vlan_features;
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 17c5ba7..29a54cc 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -59,7 +59,6 @@
 						 * safely advertise a maxsize
 						 * of 64k */
 
-#define P9_RDMA_MAX_SGE (P9_RDMA_MAXSIZE >> PAGE_SHIFT)
 /**
  * struct p9_trans_rdma - RDMA transport instance
  *
diff --git a/net/Kconfig b/net/Kconfig
index 7284062..79cabf1 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -221,6 +221,12 @@
 	depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
 	default y
 
+config RFS_ACCEL
+	boolean
+	depends on RPS && GENERIC_HARDIRQS
+	select CPU_RMAP
+	default y
+
 config XPS
 	boolean
 	depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index d936aec..2de93d0 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -1,5 +1,5 @@
 #
-# Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+# Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
 #
 # Marek Lindner, Simon Wunderlich
 #
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c
index 3850a3e..1997725 100644
--- a/net/batman-adv/aggregation.c
+++ b/net/batman-adv/aggregation.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
diff --git a/net/batman-adv/aggregation.h b/net/batman-adv/aggregation.h
index 71a91b3..6ce305b 100644
--- a/net/batman-adv/aggregation.h
+++ b/net/batman-adv/aggregation.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
index 0ae81d0..0e9d435 100644
--- a/net/batman-adv/bat_debugfs.c
+++ b/net/batman-adv/bat_debugfs.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -52,7 +52,6 @@
 
 static int fdebug_log(struct debug_log *debug_log, char *fmt, ...)
 {
-	int printed_len;
 	va_list args;
 	static char debug_log_buf[256];
 	char *p;
@@ -62,8 +61,7 @@
 
 	spin_lock_bh(&debug_log->lock);
 	va_start(args, fmt);
-	printed_len = vscnprintf(debug_log_buf, sizeof(debug_log_buf),
-				 fmt, args);
+	vscnprintf(debug_log_buf, sizeof(debug_log_buf), fmt, args);
 	va_end(args);
 
 	for (p = debug_log_buf; *p != 0; p++)
diff --git a/net/batman-adv/bat_debugfs.h b/net/batman-adv/bat_debugfs.h
index 72df532..bc9cda3 100644
--- a/net/batman-adv/bat_debugfs.h
+++ b/net/batman-adv/bat_debugfs.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index cd7bb51..f7b93a0 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
diff --git a/net/batman-adv/bat_sysfs.h b/net/batman-adv/bat_sysfs.h
index 7f186c0..02f1fa7 100644
--- a/net/batman-adv/bat_sysfs.h
+++ b/net/batman-adv/bat_sysfs.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index bbcd8f7..ad2ca92 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index ac54017..769c246 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 0065ffb..429a013 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 4585e65..2aa4391 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index b962982..50d3a59 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index 5e728d0..55e527a 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 4f95777..f2131f4 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -34,6 +34,12 @@
 /* protect update critical side of if_list - but not the content */
 static DEFINE_SPINLOCK(if_list_lock);
 
+
+static int batman_skb_recv(struct sk_buff *skb,
+			   struct net_device *dev,
+			   struct packet_type *ptype,
+			   struct net_device *orig_dev);
+
 static void hardif_free_rcu(struct rcu_head *rcu)
 {
 	struct batman_if *batman_if;
@@ -549,8 +555,9 @@
 
 /* receive a packet with the batman ethertype coming on a hard
  * interface */
-int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
-	struct packet_type *ptype, struct net_device *orig_dev)
+static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
+			   struct packet_type *ptype,
+			   struct net_device *orig_dev)
 {
 	struct bat_priv *bat_priv;
 	struct batman_packet *batman_packet;
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 30ec3b8..ad19543 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -35,10 +35,6 @@
 int hardif_enable_interface(struct batman_if *batman_if, char *iface_name);
 void hardif_disable_interface(struct batman_if *batman_if);
 void hardif_remove_interfaces(void);
-int batman_skb_recv(struct sk_buff *skb,
-				struct net_device *dev,
-				struct packet_type *ptype,
-				struct net_device *orig_dev);
 int hardif_min_mtu(struct net_device *soft_iface);
 void update_min_mtu(struct net_device *soft_iface);
 
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index 26e623e..fa26939 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 09216ad..eae2440 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2006-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2006-2011 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
@@ -49,11 +49,6 @@
 /* allocates and clears the hash */
 struct hashtable_t *hash_new(int size);
 
-/* remove element if you already found the element you want to delete and don't
- * need the overhead to find it again with hash_remove().  But usually, you
- * don't want to use this function, as it fiddles with hash-internals. */
-void *hash_remove_element(struct hashtable_t *hash, struct element_t *elem);
-
 /* free only the hashtable and the hash itself. */
 void hash_destroy(struct hashtable_t *hash);
 
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index ecf6d7f..319a7cc 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -24,7 +24,6 @@
 #include <linux/slab.h>
 #include "icmp_socket.h"
 #include "send.h"
-#include "types.h"
 #include "hash.h"
 #include "originator.h"
 #include "hard-interface.h"
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index bf9b348..462b190 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -22,8 +22,6 @@
 #ifndef _NET_BATMAN_ADV_ICMP_SOCKET_H_
 #define _NET_BATMAN_ADV_ICMP_SOCKET_H_
 
-#include "types.h"
-
 #define ICMP_SOCKET "socket"
 
 void bat_socket_init(void);
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index b827f6a..06d956c 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -30,7 +30,6 @@
 #include "translation-table.h"
 #include "hard-interface.h"
 #include "gateway_client.h"
-#include "types.h"
 #include "vis.h"
 #include "hash.h"
 
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index d4d9926..e235d7b 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -22,9 +22,6 @@
 #ifndef _NET_BATMAN_ADV_MAIN_H_
 #define _NET_BATMAN_ADV_MAIN_H_
 
-/* Kernel Programming */
-#define LINUX
-
 #define DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \
 		      "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
 #define DRIVER_DESC   "B.A.T.M.A.N. advanced"
@@ -54,7 +51,6 @@
 
 #define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
 
-#define PACKBUFF_SIZE 2000
 #define LOG_BUF_LEN 8192	  /* has to be a power of 2 */
 
 #define VIS_INTERVAL 5000	/* 5 seconds */
@@ -96,15 +92,11 @@
 #define DBG_ROUTES 2	/* route or hna added / changed / deleted */
 #define DBG_ALL 3
 
-#define LOG_BUF_LEN 8192          /* has to be a power of 2 */
-
 
 /*
  *  Vis
  */
 
-/* #define VIS_SUBCLUSTERS_DISABLED */
-
 /*
  * Kernel headers
  */
@@ -151,20 +143,13 @@
 	}							\
 	while (0)
 #else /* !CONFIG_BATMAN_ADV_DEBUG */
-static inline void bat_dbg(char type __attribute__((unused)),
-			   struct bat_priv *bat_priv __attribute__((unused)),
-			   char *fmt __attribute__((unused)), ...)
+static inline void bat_dbg(char type __always_unused,
+			   struct bat_priv *bat_priv __always_unused,
+			   char *fmt __always_unused, ...)
 {
 }
 #endif
 
-#define bat_warning(net_dev, fmt, arg...)				\
-	do {								\
-		struct net_device *_netdev = (net_dev);                 \
-		struct bat_priv *_batpriv = netdev_priv(_netdev);       \
-		bat_dbg(DBG_ALL, _batpriv, fmt, ## arg);		\
-		pr_warning("%s: " fmt, _netdev->name, ## arg);		\
-	} while (0)
 #define bat_info(net_dev, fmt, arg...)					\
 	do {								\
 		struct net_device *_netdev = (net_dev);                 \
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 6b7fb6b..54863c9 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2009-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -247,7 +247,7 @@
 				      orig_node->hna_buff_len);
 			/* update bonding candidates, we could have lost
 			 * some candidates. */
-			update_bonding_candidates(bat_priv, orig_node);
+			update_bonding_candidates(orig_node);
 		}
 	}
 
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index d474ceb..8019fbd 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index b49fdf7..e757187 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -50,6 +50,7 @@
 
 /* fragmentation defines */
 #define UNI_FRAG_HEAD 0x01
+#define UNI_FRAG_LARGETAIL 0x02
 
 struct batman_packet {
 	uint8_t  packet_type;
@@ -63,7 +64,7 @@
 	uint8_t  num_hna;
 	uint8_t  gw_flags;  /* flags related to gateway class */
 	uint8_t  align;
-} __attribute__((packed));
+} __packed;
 
 #define BAT_PACKET_LEN sizeof(struct batman_packet)
 
@@ -76,7 +77,7 @@
 	uint8_t  orig[6];
 	uint16_t seqno;
 	uint8_t  uid;
-} __attribute__((packed));
+} __packed;
 
 #define BAT_RR_LEN 16
 
@@ -93,14 +94,14 @@
 	uint8_t  uid;
 	uint8_t  rr_cur;
 	uint8_t  rr[BAT_RR_LEN][ETH_ALEN];
-} __attribute__((packed));
+} __packed;
 
 struct unicast_packet {
 	uint8_t  packet_type;
 	uint8_t  version;  /* batman version field */
 	uint8_t  dest[6];
 	uint8_t  ttl;
-} __attribute__((packed));
+} __packed;
 
 struct unicast_frag_packet {
 	uint8_t  packet_type;
@@ -110,7 +111,7 @@
 	uint8_t  flags;
 	uint8_t  orig[6];
 	uint16_t seqno;
-} __attribute__((packed));
+} __packed;
 
 struct bcast_packet {
 	uint8_t  packet_type;
@@ -118,7 +119,7 @@
 	uint8_t  orig[6];
 	uint8_t  ttl;
 	uint32_t seqno;
-} __attribute__((packed));
+} __packed;
 
 struct vis_packet {
 	uint8_t  packet_type;
@@ -131,6 +132,6 @@
 				  * neighbors */
 	uint8_t  target_orig[6]; /* who should receive this packet */
 	uint8_t  sender_orig[6]; /* who sent or rebroadcasted this packet */
-} __attribute__((packed));
+} __packed;
 
 #endif /* _NET_BATMAN_ADV_PACKET_H_ */
diff --git a/net/batman-adv/ring_buffer.c b/net/batman-adv/ring_buffer.c
index defd37c..5bb6a61 100644
--- a/net/batman-adv/ring_buffer.c
+++ b/net/batman-adv/ring_buffer.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
diff --git a/net/batman-adv/ring_buffer.h b/net/batman-adv/ring_buffer.h
index 6b0cb9aa..0395b27 100644
--- a/net/batman-adv/ring_buffer.h
+++ b/net/batman-adv/ring_buffer.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 8828edd..8274140 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -28,7 +28,6 @@
 #include "icmp_socket.h"
 #include "translation-table.h"
 #include "originator.h"
-#include "types.h"
 #include "ring_buffer.h"
 #include "vis.h"
 #include "aggregation.h"
@@ -433,8 +432,7 @@
 }
 
 /* copy primary address for bonding */
-static void mark_bonding_address(struct bat_priv *bat_priv,
-				 struct orig_node *orig_node,
+static void mark_bonding_address(struct orig_node *orig_node,
 				 struct orig_node *orig_neigh_node,
 				 struct batman_packet *batman_packet)
 
@@ -447,8 +445,7 @@
 }
 
 /* mark possible bond.candidates in the neighbor list */
-void update_bonding_candidates(struct bat_priv *bat_priv,
-			       struct orig_node *orig_node)
+void update_bonding_candidates(struct orig_node *orig_node)
 {
 	int candidates;
 	int interference_candidate;
@@ -730,9 +727,8 @@
 		update_orig(bat_priv, orig_node, ethhdr, batman_packet,
 			    if_incoming, hna_buff, hna_buff_len, is_duplicate);
 
-	mark_bonding_address(bat_priv, orig_node,
-			     orig_neigh_node, batman_packet);
-	update_bonding_candidates(bat_priv, orig_node);
+	mark_bonding_address(orig_node, orig_neigh_node, batman_packet);
+	update_bonding_candidates(orig_node);
 
 	/* is single hop (direct) neighbor */
 	if (is_single_hop_neigh) {
@@ -810,13 +806,11 @@
 {
 	struct orig_node *orig_node;
 	struct icmp_packet_rr *icmp_packet;
-	struct ethhdr *ethhdr;
 	struct batman_if *batman_if;
 	int ret;
 	uint8_t dstaddr[ETH_ALEN];
 
 	icmp_packet = (struct icmp_packet_rr *)skb->data;
-	ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
 	/* add data to device queue */
 	if (icmp_packet->msg_type != ECHO_REQUEST) {
@@ -848,7 +842,6 @@
 			return NET_RX_DROP;
 
 		icmp_packet = (struct icmp_packet_rr *)skb->data;
-		ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
 		memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
 		memcpy(icmp_packet->orig,
@@ -866,17 +859,15 @@
 }
 
 static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
-				  struct sk_buff *skb, size_t icmp_len)
+				  struct sk_buff *skb)
 {
 	struct orig_node *orig_node;
 	struct icmp_packet *icmp_packet;
-	struct ethhdr *ethhdr;
 	struct batman_if *batman_if;
 	int ret;
 	uint8_t dstaddr[ETH_ALEN];
 
 	icmp_packet = (struct icmp_packet *)skb->data;
-	ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
 	/* send TTL exceeded if packet is an echo request (traceroute) */
 	if (icmp_packet->msg_type != ECHO_REQUEST) {
@@ -909,7 +900,6 @@
 			return NET_RX_DROP;
 
 		icmp_packet = (struct icmp_packet *) skb->data;
-		ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
 		memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
 		memcpy(icmp_packet->orig,
@@ -978,7 +968,7 @@
 
 	/* TTL exceeded */
 	if (icmp_packet->ttl < 2)
-		return recv_icmp_ttl_exceeded(bat_priv, skb, hdr_size);
+		return recv_icmp_ttl_exceeded(bat_priv, skb);
 
 	ret = NET_RX_DROP;
 
@@ -1001,7 +991,6 @@
 			return NET_RX_DROP;
 
 		icmp_packet = (struct icmp_packet_rr *)skb->data;
-		ethhdr = (struct ethhdr *)skb_mac_header(skb);
 
 		/* decrement ttl */
 		icmp_packet->ttl--;
@@ -1193,7 +1182,7 @@
 				     dstaddr);
 
 	if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
-	    2 * skb->len - hdr_size <= batman_if->net_dev->mtu) {
+	    frag_can_reassemble(skb, batman_if->net_dev->mtu)) {
 
 		ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
 
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index f108f23..a09d16f 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -22,8 +22,6 @@
 #ifndef _NET_BATMAN_ADV_ROUTING_H_
 #define _NET_BATMAN_ADV_ROUTING_H_
 
-#include "types.h"
-
 void slide_own_bcast_window(struct batman_if *batman_if);
 void receive_bat_packet(struct ethhdr *ethhdr,
 				struct batman_packet *batman_packet,
@@ -42,7 +40,6 @@
 int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
 struct neigh_node *find_router(struct bat_priv *bat_priv,
 		struct orig_node *orig_node, struct batman_if *recv_if);
-void update_bonding_candidates(struct bat_priv *bat_priv,
-			       struct orig_node *orig_node);
+void update_bonding_candidates(struct orig_node *orig_node);
 
 #endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index b89b9f7..8314276 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -25,7 +25,6 @@
 #include "translation-table.h"
 #include "soft-interface.h"
 #include "hard-interface.h"
-#include "types.h"
 #include "vis.h"
 #include "aggregation.h"
 #include "gateway_common.h"
@@ -49,7 +48,7 @@
 }
 
 /* when do we schedule a forwarded packet to be sent */
-static unsigned long forward_send_time(struct bat_priv *bat_priv)
+static unsigned long forward_send_time(void)
 {
 	return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
 }
@@ -356,7 +355,7 @@
 	else
 		batman_packet->flags &= ~DIRECTLINK;
 
-	send_time = forward_send_time(bat_priv);
+	send_time = forward_send_time();
 	add_bat_packet_to_list(bat_priv,
 			       (unsigned char *)batman_packet,
 			       sizeof(struct batman_packet) + hna_buff_len,
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index c4cefa8..b68c272 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -22,8 +22,6 @@
 #ifndef _NET_BATMAN_ADV_SEND_H_
 #define _NET_BATMAN_ADV_SEND_H_
 
-#include "types.h"
-
 int send_skb_packet(struct sk_buff *skb,
 				struct batman_if *batman_if,
 				uint8_t *dst_addr);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index e89ede1..bd088f8 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -26,7 +26,6 @@
 #include "send.h"
 #include "bat_debugfs.h"
 #include "translation-table.h"
-#include "types.h"
 #include "hash.h"
 #include "gateway_common.h"
 #include "gateway_client.h"
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 02b7733..e7b0e1a 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index a633b5a4..7fb6726 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -22,7 +22,6 @@
 #include "main.h"
 #include "translation-table.h"
 #include "soft-interface.h"
-#include "types.h"
 #include "hash.h"
 #include "originator.h"
 
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 10c4c5c..f19931c 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -22,8 +22,6 @@
 #ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
 #define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
 
-#include "types.h"
-
 int hna_local_init(struct bat_priv *bat_priv);
 void hna_local_add(struct net_device *soft_iface, uint8_t *addr);
 void hna_local_remove(struct bat_priv *bat_priv,
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 97cb23d..7270405 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -246,13 +246,13 @@
 	/* this packet might be part of the vis send queue. */
 	struct sk_buff *skb_packet;
 	/* vis_info may follow here*/
-} __attribute__((packed));
+} __packed;
 
 struct vis_info_entry {
 	uint8_t  src[ETH_ALEN];
 	uint8_t  dest[ETH_ALEN];
 	uint8_t  quality;	/* quality = 0 means HNA */
-} __attribute__((packed));
+} __packed;
 
 struct recvlist_node {
 	struct list_head list;
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index dc2e28b..121b11d 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
  *
  * Andreas Langer
  *
@@ -39,8 +39,8 @@
 		(struct unicast_frag_packet *)skb->data;
 	struct sk_buff *tmp_skb;
 	struct unicast_packet *unicast_packet;
-	int hdr_len = sizeof(struct unicast_packet),
-	    uni_diff = sizeof(struct unicast_frag_packet) - hdr_len;
+	int hdr_len = sizeof(struct unicast_packet);
+	int uni_diff = sizeof(struct unicast_frag_packet) - hdr_len;
 
 	/* set skb to the first part and tmp_skb to the second part */
 	if (up->flags & UNI_FRAG_HEAD) {
@@ -50,12 +50,12 @@
 		skb = tfp->skb;
 	}
 
+	if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0)
+		goto err;
+
 	skb_pull(tmp_skb, sizeof(struct unicast_frag_packet));
-	if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) {
-		/* free buffered skb, skb will be freed later */
-		kfree_skb(tfp->skb);
-		return NULL;
-	}
+	if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0)
+		goto err;
 
 	/* move free entry to end */
 	tfp->skb = NULL;
@@ -70,6 +70,11 @@
 	unicast_packet->packet_type = BAT_UNICAST;
 
 	return skb;
+
+err:
+	/* free buffered skb, skb will be freed later */
+	kfree_skb(tfp->skb);
+	return NULL;
 }
 
 static void frag_create_entry(struct list_head *head, struct sk_buff *skb)
@@ -224,16 +229,21 @@
 	struct unicast_frag_packet *frag1, *frag2;
 	int uc_hdr_len = sizeof(struct unicast_packet);
 	int ucf_hdr_len = sizeof(struct unicast_frag_packet);
-	int data_len = skb->len;
+	int data_len = skb->len - uc_hdr_len;
+	int large_tail = 0;
+	uint16_t seqno;
 
 	if (!bat_priv->primary_if)
 		goto dropped;
 
-	unicast_packet = (struct unicast_packet *) skb->data;
-
-	memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
 	frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
-	skb_split(skb, frag_skb, data_len / 2);
+	if (!frag_skb)
+		goto dropped;
+	skb_reserve(frag_skb, ucf_hdr_len);
+
+	unicast_packet = (struct unicast_packet *) skb->data;
+	memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
+	skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
 
 	if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
 	    my_skb_head_push(frag_skb, ucf_hdr_len) < 0)
@@ -251,13 +261,15 @@
 	memcpy(frag1->orig, bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
 	memcpy(frag2, frag1, sizeof(struct unicast_frag_packet));
 
-	frag1->flags |= UNI_FRAG_HEAD;
-	frag2->flags &= ~UNI_FRAG_HEAD;
+	if (data_len & 1)
+		large_tail = UNI_FRAG_LARGETAIL;
 
-	frag1->seqno = htons((uint16_t)atomic_inc_return(
-			     &batman_if->frag_seqno));
-	frag2->seqno = htons((uint16_t)atomic_inc_return(
-			     &batman_if->frag_seqno));
+	frag1->flags = UNI_FRAG_HEAD | large_tail;
+	frag2->flags = large_tail;
+
+	seqno = atomic_add_return(2, &batman_if->frag_seqno);
+	frag1->seqno = htons(seqno - 1);
+	frag2->seqno = htons(seqno);
 
 	send_skb_packet(skb, batman_if, dstaddr);
 	send_skb_packet(frag_skb, batman_if, dstaddr);
@@ -274,7 +286,7 @@
 {
 	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
 	struct unicast_packet *unicast_packet;
-	struct orig_node *orig_node;
+	struct orig_node *orig_node = NULL;
 	struct batman_if *batman_if;
 	struct neigh_node *router;
 	int data_len = skb->len;
@@ -285,11 +297,6 @@
 	/* get routing information */
 	if (is_multicast_ether_addr(ethhdr->h_dest))
 		orig_node = (struct orig_node *)gw_get_selected(bat_priv);
-	else
-		orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
-							   compare_orig,
-							   choose_orig,
-							   ethhdr->h_dest));
 
 	/* check for hna host */
 	if (!orig_node)
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index e32b786..8897308 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
  *
  * Andreas Langer
  *
@@ -22,6 +22,8 @@
 #ifndef _NET_BATMAN_ADV_UNICAST_H_
 #define _NET_BATMAN_ADV_UNICAST_H_
 
+#include "packet.h"
+
 #define FRAG_TIMEOUT 10000	/* purge frag list entrys after time in ms */
 #define FRAG_BUFFER_SIZE 6	/* number of list elements in buffer */
 
@@ -32,4 +34,25 @@
 int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
 		  struct batman_if *batman_if, uint8_t dstaddr[]);
 
+static inline int frag_can_reassemble(struct sk_buff *skb, int mtu)
+{
+	struct unicast_frag_packet *unicast_packet;
+	int uneven_correction = 0;
+	unsigned int merged_size;
+
+	unicast_packet = (struct unicast_frag_packet *)skb->data;
+
+	if (unicast_packet->flags & UNI_FRAG_LARGETAIL) {
+		if (unicast_packet->flags & UNI_FRAG_HEAD)
+			uneven_correction = 1;
+		else
+			uneven_correction = -1;
+	}
+
+	merged_size = (skb->len - sizeof(struct unicast_frag_packet)) * 2;
+	merged_size += sizeof(struct unicast_packet) + uneven_correction;
+
+	return merged_size <= mtu;
+}
+
 #endif /* _NET_BATMAN_ADV_UNICAST_H_ */
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index cd4c423..7db9ad8 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2008-2011 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
  *
@@ -64,6 +64,7 @@
 
 	spin_unlock_bh(&bat_priv->vis_list_lock);
 	kfree_skb(info->skb_packet);
+	kfree(info);
 }
 
 /* Compare two vis packets, used by the hashing algorithm */
@@ -268,10 +269,10 @@
 				buff_pos += sprintf(buff + buff_pos, "%pM,",
 						entry->addr);
 
-				for (i = 0; i < packet->entries; i++)
+				for (j = 0; j < packet->entries; j++)
 					buff_pos += vis_data_read_entry(
 							buff + buff_pos,
-							&entries[i],
+							&entries[j],
 							entry->addr,
 							entry->primary);
 
@@ -444,7 +445,7 @@
 			      info);
 	if (hash_added < 0) {
 		/* did not work (for some reason) */
-		kref_put(&old_info->refcount, free_info);
+		kref_put(&info->refcount, free_info);
 		info = NULL;
 	}
 
@@ -815,7 +816,7 @@
 		container_of(work, struct delayed_work, work);
 	struct bat_priv *bat_priv =
 		container_of(delayed_work, struct bat_priv, vis_work);
-	struct vis_info *info, *temp;
+	struct vis_info *info;
 
 	spin_lock_bh(&bat_priv->vis_hash_lock);
 	purge_vis_packets(bat_priv);
@@ -825,8 +826,9 @@
 		send_list_add(bat_priv, bat_priv->my_vis_info);
 	}
 
-	list_for_each_entry_safe(info, temp, &bat_priv->vis_send_list,
-				 send_list) {
+	while (!list_empty(&bat_priv->vis_send_list)) {
+		info = list_first_entry(&bat_priv->vis_send_list,
+					typeof(*info), send_list);
 
 		kref_get(&info->refcount);
 		spin_unlock_bh(&bat_priv->vis_hash_lock);
diff --git a/net/batman-adv/vis.h b/net/batman-adv/vis.h
index 2c3b330..31b820d 100644
--- a/net/batman-adv/vis.h
+++ b/net/batman-adv/vis.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
+ * Copyright (C) 2008-2011 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 5564435..1461b19 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -297,6 +297,21 @@
 
 #endif
 
+static int br_add_slave(struct net_device *dev, struct net_device *slave_dev)
+
+{
+	struct net_bridge *br = netdev_priv(dev);
+
+	return br_add_if(br, slave_dev);
+}
+
+static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
+{
+	struct net_bridge *br = netdev_priv(dev);
+
+	return br_del_if(br, slave_dev);
+}
+
 static const struct ethtool_ops br_ethtool_ops = {
 	.get_drvinfo    = br_getinfo,
 	.get_link	= ethtool_op_get_link,
@@ -326,6 +341,8 @@
 	.ndo_netpoll_cleanup	 = br_netpoll_cleanup,
 	.ndo_poll_controller	 = br_poll_controller,
 #endif
+	.ndo_add_slave		 = br_add_slave,
+	.ndo_del_slave		 = br_del_slave,
 };
 
 static void br_dev_free(struct net_device *dev)
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 2872393..88485cc 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -328,12 +328,12 @@
 	fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
 	if (fdb) {
 		memcpy(fdb->addr.addr, addr, ETH_ALEN);
-		hlist_add_head_rcu(&fdb->hlist, head);
-
 		fdb->dst = source;
 		fdb->is_local = is_local;
 		fdb->is_static = is_local;
 		fdb->ageing_timer = jiffies;
+
+		hlist_add_head_rcu(&fdb->hlist, head);
 	}
 	return fdb;
 }
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index d9d1e2b..dce8f00 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -148,6 +148,8 @@
 
 	netdev_rx_handler_unregister(dev);
 
+	netdev_set_master(dev, NULL);
+
 	br_multicast_del_port(p);
 
 	kobject_uevent(&p->kobj, KOBJ_REMOVE);
@@ -365,7 +367,7 @@
 void br_features_recompute(struct net_bridge *br)
 {
 	struct net_bridge_port *p;
-	unsigned long features, mask;
+	u32 features, mask;
 
 	features = mask = br->feature_mask;
 	if (list_empty(&br->port_list))
@@ -379,7 +381,7 @@
 	}
 
 done:
-	br->dev->features = netdev_fix_features(features, NULL);
+	br->dev->features = netdev_fix_features(br->dev, features);
 }
 
 /* called with RTNL */
@@ -429,10 +431,14 @@
 	if (br_netpoll_info(br) && ((err = br_netpoll_enable(p))))
 		goto err3;
 
-	err = netdev_rx_handler_register(dev, br_handle_frame, p);
+	err = netdev_set_master(dev, br->dev);
 	if (err)
 		goto err3;
 
+	err = netdev_rx_handler_register(dev, br_handle_frame, p);
+	if (err)
+		goto err4;
+
 	dev->priv_flags |= IFF_BRIDGE_PORT;
 
 	dev_disable_lro(dev);
@@ -455,6 +461,9 @@
 	kobject_uevent(&p->kobj, KOBJ_ADD);
 
 	return 0;
+
+err4:
+	netdev_set_master(dev, NULL);
 err3:
 	sysfs_remove_link(br->ifobj, p->dev->name);
 err2:
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 6f6d8e1..88e4aa9 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -80,7 +80,7 @@
 	if (is_multicast_ether_addr(dest)) {
 		mdst = br_mdb_get(br, skb);
 		if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
-			if ((mdst && !hlist_unhashed(&mdst->mglist)) ||
+			if ((mdst && mdst->mglist) ||
 			    br_multicast_is_router(br))
 				skb2 = skb;
 			br_multicast_forward(mdst, skb, skb2);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index f701a21..09d5c09 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -232,8 +232,7 @@
 	if (!netif_running(br->dev) || timer_pending(&mp->timer))
 		goto out;
 
-	if (!hlist_unhashed(&mp->mglist))
-		hlist_del_init(&mp->mglist);
+	mp->mglist = false;
 
 	if (mp->ports)
 		goto out;
@@ -276,7 +275,7 @@
 		del_timer(&p->query_timer);
 		call_rcu_bh(&p->rcu, br_multicast_free_pg);
 
-		if (!mp->ports && hlist_unhashed(&mp->mglist) &&
+		if (!mp->ports && !mp->mglist &&
 		    netif_running(br->dev))
 			mod_timer(&mp->timer, jiffies);
 
@@ -528,7 +527,7 @@
 	struct net_bridge *br = mp->br;
 
 	spin_lock(&br->multicast_lock);
-	if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) ||
+	if (!netif_running(br->dev) || !mp->mglist ||
 	    mp->queries_sent >= br->multicast_last_member_count)
 		goto out;
 
@@ -719,7 +718,7 @@
 		goto err;
 
 	if (!port) {
-		hlist_add_head(&mp->mglist, &br->mglist);
+		mp->mglist = true;
 		mod_timer(&mp->timer, now + br->multicast_membership_interval);
 		goto out;
 	}
@@ -1165,7 +1164,7 @@
 
 	max_delay *= br->multicast_last_member_count;
 
-	if (!hlist_unhashed(&mp->mglist) &&
+	if (mp->mglist &&
 	    (timer_pending(&mp->timer) ?
 	     time_after(mp->timer.expires, now + max_delay) :
 	     try_to_del_timer_sync(&mp->timer) >= 0))
@@ -1177,7 +1176,7 @@
 		if (timer_pending(&p->timer) ?
 		    time_after(p->timer.expires, now + max_delay) :
 		    try_to_del_timer_sync(&p->timer) >= 0)
-			mod_timer(&mp->timer, now + max_delay);
+			mod_timer(&p->timer, now + max_delay);
 	}
 
 out:
@@ -1236,7 +1235,7 @@
 		goto out;
 
 	max_delay *= br->multicast_last_member_count;
-	if (!hlist_unhashed(&mp->mglist) &&
+	if (mp->mglist &&
 	    (timer_pending(&mp->timer) ?
 	     time_after(mp->timer.expires, now + max_delay) :
 	     try_to_del_timer_sync(&mp->timer) >= 0))
@@ -1248,7 +1247,7 @@
 		if (timer_pending(&p->timer) ?
 		    time_after(p->timer.expires, now + max_delay) :
 		    try_to_del_timer_sync(&p->timer) >= 0)
-			mod_timer(&mp->timer, now + max_delay);
+			mod_timer(&p->timer, now + max_delay);
 	}
 
 out:
@@ -1283,7 +1282,7 @@
 		     br->multicast_last_member_interval;
 
 	if (!port) {
-		if (!hlist_unhashed(&mp->mglist) &&
+		if (mp->mglist &&
 		    (timer_pending(&mp->timer) ?
 		     time_after(mp->timer.expires, time) :
 		     try_to_del_timer_sync(&mp->timer) >= 0)) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 84aac77..f7afc36 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -84,13 +84,13 @@
 struct net_bridge_mdb_entry
 {
 	struct hlist_node		hlist[2];
-	struct hlist_node		mglist;
 	struct net_bridge		*br;
 	struct net_bridge_port_group __rcu *ports;
 	struct rcu_head			rcu;
 	struct timer_list		timer;
 	struct timer_list		query_timer;
 	struct br_ip			addr;
+	bool				mglist;
 	u32				queries_sent;
 };
 
@@ -182,7 +182,7 @@
 	struct br_cpu_netstats __percpu *stats;
 	spinlock_t			hash_lock;
 	struct hlist_head		hash[BR_HASH_SIZE];
-	unsigned long			feature_mask;
+	u32				feature_mask;
 #ifdef CONFIG_BRIDGE_NETFILTER
 	struct rtable 			fake_rtable;
 	bool				nf_call_iptables;
@@ -238,7 +238,6 @@
 	spinlock_t			multicast_lock;
 	struct net_bridge_mdb_htable __rcu *mdb;
 	struct hlist_head		router_list;
-	struct hlist_head		mglist;
 
 	struct timer_list		multicast_router_timer;
 	struct timer_list		multicast_querier_timer;
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c
index 50a46af..2ed0056 100644
--- a/net/bridge/netfilter/ebt_ip6.c
+++ b/net/bridge/netfilter/ebt_ip6.c
@@ -22,9 +22,15 @@
 #include <linux/netfilter_bridge/ebtables.h>
 #include <linux/netfilter_bridge/ebt_ip6.h>
 
-struct tcpudphdr {
-	__be16 src;
-	__be16 dst;
+union pkthdr {
+	struct {
+		__be16 src;
+		__be16 dst;
+	} tcpudphdr;
+	struct {
+		u8 type;
+		u8 code;
+	} icmphdr;
 };
 
 static bool
@@ -33,8 +39,8 @@
 	const struct ebt_ip6_info *info = par->matchinfo;
 	const struct ipv6hdr *ih6;
 	struct ipv6hdr _ip6h;
-	const struct tcpudphdr *pptr;
-	struct tcpudphdr _ports;
+	const union pkthdr *pptr;
+	union pkthdr _pkthdr;
 
 	ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
 	if (ih6 == NULL)
@@ -56,26 +62,34 @@
 			return false;
 		if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO))
 			return false;
-		if (!(info->bitmask & EBT_IP6_DPORT) &&
-		    !(info->bitmask & EBT_IP6_SPORT))
+		if (!(info->bitmask & ( EBT_IP6_DPORT |
+					EBT_IP6_SPORT | EBT_IP6_ICMP6)))
 			return true;
-		pptr = skb_header_pointer(skb, offset_ph, sizeof(_ports),
-					  &_ports);
+
+		/* min icmpv6 headersize is 4, so sizeof(_pkthdr) is ok. */
+		pptr = skb_header_pointer(skb, offset_ph, sizeof(_pkthdr),
+					  &_pkthdr);
 		if (pptr == NULL)
 			return false;
 		if (info->bitmask & EBT_IP6_DPORT) {
-			u32 dst = ntohs(pptr->dst);
+			u16 dst = ntohs(pptr->tcpudphdr.dst);
 			if (FWINV(dst < info->dport[0] ||
 				  dst > info->dport[1], EBT_IP6_DPORT))
 				return false;
 		}
 		if (info->bitmask & EBT_IP6_SPORT) {
-			u32 src = ntohs(pptr->src);
+			u16 src = ntohs(pptr->tcpudphdr.src);
 			if (FWINV(src < info->sport[0] ||
 				  src > info->sport[1], EBT_IP6_SPORT))
 			return false;
 		}
-		return true;
+		if ((info->bitmask & EBT_IP6_ICMP6) &&
+		     FWINV(pptr->icmphdr.type < info->icmpv6_type[0] ||
+			   pptr->icmphdr.type > info->icmpv6_type[1] ||
+			   pptr->icmphdr.code < info->icmpv6_code[0] ||
+			   pptr->icmphdr.code > info->icmpv6_code[1],
+							EBT_IP6_ICMP6))
+			return false;
 	}
 	return true;
 }
@@ -103,6 +117,14 @@
 		return -EINVAL;
 	if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1])
 		return -EINVAL;
+	if (info->bitmask & EBT_IP6_ICMP6) {
+		if ((info->invflags & EBT_IP6_PROTO) ||
+		     info->protocol != IPPROTO_ICMPV6)
+			return -EINVAL;
+		if (info->icmpv6_type[0] > info->icmpv6_type[1] ||
+		    info->icmpv6_code[0] > info->icmpv6_code[1])
+			return -EINVAL;
+	}
 	return 0;
 }
 
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 16df053..5f1825d 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1764,6 +1764,7 @@
 
 	newinfo->entries_size = size;
 
+	xt_compat_init_offsets(AF_INET, info->nentries);
 	return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
 							entries, newinfo);
 }
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 21ede14..f1f98d9 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -23,10 +23,8 @@
 #include <asm/atomic.h>
 
 #define MAX_PHY_LAYERS 7
-#define PHY_NAME_LEN 20
 
 #define container_obj(layr) container_of(layr, struct cfcnfg, layer)
-#define RFM_FRAGMENT_SIZE 4030
 
 /* Information about CAIF physical interfaces held by Config Module in order
  * to manage physical interfaces
@@ -191,6 +189,7 @@
 	struct cflayer *servl = NULL;
 	struct cfcnfg_phyinfo *phyinfo = NULL;
 	u8 phyid = 0;
+
 	caif_assert(adap_layer != NULL);
 	channel_id = adap_layer->id;
 	if (adap_layer->dn == NULL || channel_id == 0) {
@@ -199,16 +198,16 @@
 		goto end;
 	}
 	servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id);
-	if (servl == NULL)
-		goto end;
-	layer_set_up(servl, NULL);
-	ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
 	if (servl == NULL) {
 		pr_err("PROTOCOL ERROR - Error removing service_layer Channel_Id(%d)",
 		       channel_id);
 		ret = -EINVAL;
 		goto end;
 	}
+	layer_set_up(servl, NULL);
+	ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
+	if (ret)
+		goto end;
 	caif_assert(channel_id == servl->id);
 	if (adap_layer->dn != NULL) {
 		phyid = cfsrvl_getphyid(adap_layer->dn);
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index d3ed264..27dab26 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -18,7 +18,6 @@
 #define DGM_CMD_BIT  0x80
 #define DGM_FLOW_OFF 0x81
 #define DGM_FLOW_ON  0x80
-#define DGM_CTRL_PKT_SIZE 1
 #define DGM_MTU 1500
 
 static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt);
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 9297f7d..8303fe3 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -25,7 +25,6 @@
 	spinlock_t sync;
 	bool usestx;
 };
-#define STXLEN(layr) (layr->usestx ? 1 : 0)
 
 static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt);
 static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index efad410..315c0d6 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -20,7 +20,7 @@
 #define UTIL_REMOTE_SHUTDOWN 0x82
 #define UTIL_FLOW_OFF 0x81
 #define UTIL_FLOW_ON  0x80
-#define UTIL_CTRL_PKT_SIZE 1
+
 static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt);
 static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt);
 
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index 3b425b1..c3b1dec 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -17,7 +17,7 @@
 #define VEI_FLOW_OFF 0x81
 #define VEI_FLOW_ON  0x80
 #define VEI_SET_PIN  0x82
-#define VEI_CTRL_PKT_SIZE 1
+
 #define container_obj(layr) container_of(layr, struct cfsrvl, layer)
 
 static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt);
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index fa9dab3..6008d6d 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -394,9 +394,7 @@
 	priv->conn_req.sockaddr.u.dgm.connection_id = -1;
 	priv->flowenabled = false;
 
-	ASSERT_RTNL();
 	init_waitqueue_head(&priv->netmgmt_wq);
-	list_add(&priv->list_field, &chnl_net_list);
 }
 
 
@@ -453,6 +451,8 @@
 	ret = register_netdevice(dev);
 	if (ret)
 		pr_warn("device rtml registration failed\n");
+	else
+		list_add(&caifdev->list_field, &chnl_net_list);
 	return ret;
 }
 
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 9d5e8ac..092dc88 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1256,6 +1256,9 @@
 		struct sockaddr_can *addr =
 			(struct sockaddr_can *)msg->msg_name;
 
+		if (msg->msg_namelen < sizeof(*addr))
+			return -EINVAL;
+
 		if (addr->can_family != AF_CAN)
 			return -EINVAL;
 
diff --git a/net/can/raw.c b/net/can/raw.c
index e88f610..883e9d7 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -649,6 +649,9 @@
 		struct sockaddr_can *addr =
 			(struct sockaddr_can *)msg->msg_name;
 
+		if (msg->msg_namelen < sizeof(*addr))
+			return -EINVAL;
+
 		if (addr->can_family != AF_CAN)
 			return -EINVAL;
 
diff --git a/net/core/dev.c b/net/core/dev.c
index 54277df..578415c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -132,6 +132,7 @@
 #include <trace/events/skb.h>
 #include <linux/pci.h>
 #include <linux/inetdevice.h>
+#include <linux/cpu_rmap.h>
 
 #include "net-sysfs.h"
 
@@ -749,7 +750,8 @@
  *	@ha: hardware address
  *
  *	Search for an interface by MAC address. Returns NULL if the device
- *	is not found or a pointer to the device. The caller must hold RCU
+ *	is not found or a pointer to the device.
+ *	The caller must hold RCU or RTNL.
  *	The returned device has not had its ref count increased
  *	and the caller must therefore be careful about locking
  *
@@ -1279,13 +1281,16 @@
 
 static int __dev_close(struct net_device *dev)
 {
+	int retval;
 	LIST_HEAD(single);
 
 	list_add(&dev->unreg_list, &single);
-	return __dev_close_many(&single);
+	retval = __dev_close_many(&single);
+	list_del(&single);
+	return retval;
 }
 
-int dev_close_many(struct list_head *head)
+static int dev_close_many(struct list_head *head)
 {
 	struct net_device *dev, *tmp;
 	LIST_HEAD(tmp_list);
@@ -1324,7 +1329,7 @@
 
 	list_add(&dev->unreg_list, &single);
 	dev_close_many(&single);
-
+	list_del(&single);
 	return 0;
 }
 EXPORT_SYMBOL(dev_close);
@@ -1593,6 +1598,48 @@
 	rcu_read_unlock();
 }
 
+/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
+ * @dev: Network device
+ * @txq: number of queues available
+ *
+ * If real_num_tx_queues is changed the tc mappings may no longer be
+ * valid. To resolve this verify the tc mapping remains valid and if
+ * not NULL the mapping. With no priorities mapping to this
+ * offset/count pair it will no longer be used. In the worst case TC0
+ * is invalid nothing can be done so disable priority mappings. If is
+ * expected that drivers will fix this mapping if they can before
+ * calling netif_set_real_num_tx_queues.
+ */
+static void netif_setup_tc(struct net_device *dev, unsigned int txq)
+{
+	int i;
+	struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
+
+	/* If TC0 is invalidated disable TC mapping */
+	if (tc->offset + tc->count > txq) {
+		pr_warning("Number of in use tx queues changed "
+			   "invalidating tc mappings. Priority "
+			   "traffic classification disabled!\n");
+		dev->num_tc = 0;
+		return;
+	}
+
+	/* Invalidated prio to tc mappings set to TC0 */
+	for (i = 1; i < TC_BITMASK + 1; i++) {
+		int q = netdev_get_prio_tc_map(dev, i);
+
+		tc = &dev->tc_to_txq[q];
+		if (tc->offset + tc->count > txq) {
+			pr_warning("Number of in use tx queues "
+				   "changed. Priority %i to tc "
+				   "mapping %i is no longer valid "
+				   "setting map to 0\n",
+				   i, q);
+			netdev_set_prio_tc_map(dev, i, 0);
+		}
+	}
+}
+
 /*
  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
  * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
@@ -1604,7 +1651,8 @@
 	if (txq < 1 || txq > dev->num_tx_queues)
 		return -EINVAL;
 
-	if (dev->reg_state == NETREG_REGISTERED) {
+	if (dev->reg_state == NETREG_REGISTERED ||
+	    dev->reg_state == NETREG_UNREGISTERING) {
 		ASSERT_RTNL();
 
 		rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
@@ -1612,6 +1660,9 @@
 		if (rc)
 			return rc;
 
+		if (dev->num_tc)
+			netif_setup_tc(dev, txq);
+
 		if (txq < dev->real_num_tx_queues)
 			qdisc_reset_all_tx_gt(dev, txq);
 	}
@@ -1811,7 +1862,7 @@
  *	It may return NULL if the skb requires no segmentation.  This is
  *	only possible when GSO is used for verifying header integrity.
  */
-struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
+struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features)
 {
 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
 	struct packet_type *ptype;
@@ -1999,9 +2050,9 @@
 		 protocol == htons(ETH_P_FCOE)));
 }
 
-static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
+static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
 {
-	if (!can_checksum_protocol(protocol, features)) {
+	if (!can_checksum_protocol(features, protocol)) {
 		features &= ~NETIF_F_ALL_CSUM;
 		features &= ~NETIF_F_SG;
 	} else if (illegal_highdma(skb->dev, skb)) {
@@ -2011,10 +2062,10 @@
 	return features;
 }
 
-int netif_skb_features(struct sk_buff *skb)
+u32 netif_skb_features(struct sk_buff *skb)
 {
 	__be16 protocol = skb->protocol;
-	int features = skb->dev->features;
+	u32 features = skb->dev->features;
 
 	if (protocol == htons(ETH_P_8021Q)) {
 		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
@@ -2023,13 +2074,13 @@
 		return harmonize_features(skb, protocol, features);
 	}
 
-	features &= skb->dev->vlan_features;
+	features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
 
 	if (protocol != htons(ETH_P_8021Q)) {
 		return harmonize_features(skb, protocol, features);
 	} else {
 		features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
-				NETIF_F_GEN_CSUM;
+				NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
 		return harmonize_features(skb, protocol, features);
 	}
 }
@@ -2059,7 +2110,7 @@
 	int rc = NETDEV_TX_OK;
 
 	if (likely(!skb->next)) {
-		int features;
+		u32 features;
 
 		/*
 		 * If device doesnt need skb->dst, release it right now while
@@ -2161,6 +2212,8 @@
 		  unsigned int num_tx_queues)
 {
 	u32 hash;
+	u16 qoffset = 0;
+	u16 qcount = num_tx_queues;
 
 	if (skb_rx_queue_recorded(skb)) {
 		hash = skb_get_rx_queue(skb);
@@ -2169,13 +2222,19 @@
 		return hash;
 	}
 
+	if (dev->num_tc) {
+		u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
+		qoffset = dev->tc_to_txq[tc].offset;
+		qcount = dev->tc_to_txq[tc].count;
+	}
+
 	if (skb->sk && skb->sk->sk_hash)
 		hash = skb->sk->sk_hash;
 	else
 		hash = (__force u16) skb->protocol ^ skb->rxhash;
 	hash = jhash_1word(hash, hashrnd);
 
-	return (u16) (((u64) hash * num_tx_queues) >> 32);
+	return (u16) (((u64) hash * qcount) >> 32) + qoffset;
 }
 EXPORT_SYMBOL(__skb_tx_hash);
 
@@ -2272,15 +2331,18 @@
 				 struct netdev_queue *txq)
 {
 	spinlock_t *root_lock = qdisc_lock(q);
-	bool contended = qdisc_is_running(q);
+	bool contended;
 	int rc;
 
+	qdisc_skb_cb(skb)->pkt_len = skb->len;
+	qdisc_calculate_pkt_len(skb, q);
 	/*
 	 * Heuristic to force contended enqueues to serialize on a
 	 * separate lock before trying to get qdisc main lock.
 	 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
 	 * and dequeue packets faster.
 	 */
+	contended = qdisc_is_running(q);
 	if (unlikely(contended))
 		spin_lock(&q->busylock);
 
@@ -2298,7 +2360,6 @@
 		if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
 			skb_dst_force(skb);
 
-		qdisc_skb_cb(skb)->pkt_len = skb->len;
 		qdisc_bstats_update(q, skb);
 
 		if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
@@ -2313,7 +2374,7 @@
 		rc = NET_XMIT_SUCCESS;
 	} else {
 		skb_dst_force(skb);
-		rc = qdisc_enqueue_root(skb, q);
+		rc = q->enqueue(skb, q) & NET_XMIT_MASK;
 		if (qdisc_run_begin(q)) {
 			if (unlikely(contended)) {
 				spin_unlock(&q->busylock);
@@ -2532,6 +2593,54 @@
 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
 EXPORT_SYMBOL(rps_sock_flow_table);
 
+static struct rps_dev_flow *
+set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+	    struct rps_dev_flow *rflow, u16 next_cpu)
+{
+	u16 tcpu;
+
+	tcpu = rflow->cpu = next_cpu;
+	if (tcpu != RPS_NO_CPU) {
+#ifdef CONFIG_RFS_ACCEL
+		struct netdev_rx_queue *rxqueue;
+		struct rps_dev_flow_table *flow_table;
+		struct rps_dev_flow *old_rflow;
+		u32 flow_id;
+		u16 rxq_index;
+		int rc;
+
+		/* Should we steer this flow to a different hardware queue? */
+		if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
+		    !(dev->features & NETIF_F_NTUPLE))
+			goto out;
+		rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
+		if (rxq_index == skb_get_rx_queue(skb))
+			goto out;
+
+		rxqueue = dev->_rx + rxq_index;
+		flow_table = rcu_dereference(rxqueue->rps_flow_table);
+		if (!flow_table)
+			goto out;
+		flow_id = skb->rxhash & flow_table->mask;
+		rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
+							rxq_index, flow_id);
+		if (rc < 0)
+			goto out;
+		old_rflow = rflow;
+		rflow = &flow_table->flows[flow_id];
+		rflow->cpu = next_cpu;
+		rflow->filter = rc;
+		if (old_rflow->filter == rflow->filter)
+			old_rflow->filter = RPS_NO_FILTER;
+	out:
+#endif
+		rflow->last_qtail =
+			per_cpu(softnet_data, tcpu).input_queue_head;
+	}
+
+	return rflow;
+}
+
 /*
  * get_rps_cpu is called from netif_receive_skb and returns the target
  * CPU from the RPS map of the receiving queue for a given skb.
@@ -2562,7 +2671,8 @@
 
 	map = rcu_dereference(rxqueue->rps_map);
 	if (map) {
-		if (map->len == 1) {
+		if (map->len == 1 &&
+		    !rcu_dereference_raw(rxqueue->rps_flow_table)) {
 			tcpu = map->cpus[0];
 			if (cpu_online(tcpu))
 				cpu = tcpu;
@@ -2602,12 +2712,9 @@
 		if (unlikely(tcpu != next_cpu) &&
 		    (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
 		     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
-		      rflow->last_qtail)) >= 0)) {
-			tcpu = rflow->cpu = next_cpu;
-			if (tcpu != RPS_NO_CPU)
-				rflow->last_qtail = per_cpu(softnet_data,
-				    tcpu).input_queue_head;
-		}
+		      rflow->last_qtail)) >= 0))
+			rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
+
 		if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
 			*rflowp = rflow;
 			cpu = tcpu;
@@ -2628,6 +2735,46 @@
 	return cpu;
 }
 
+#ifdef CONFIG_RFS_ACCEL
+
+/**
+ * rps_may_expire_flow - check whether an RFS hardware filter may be removed
+ * @dev: Device on which the filter was set
+ * @rxq_index: RX queue index
+ * @flow_id: Flow ID passed to ndo_rx_flow_steer()
+ * @filter_id: Filter ID returned by ndo_rx_flow_steer()
+ *
+ * Drivers that implement ndo_rx_flow_steer() should periodically call
+ * this function for each installed filter and remove the filters for
+ * which it returns %true.
+ */
+bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
+			 u32 flow_id, u16 filter_id)
+{
+	struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
+	struct rps_dev_flow_table *flow_table;
+	struct rps_dev_flow *rflow;
+	bool expire = true;
+	int cpu;
+
+	rcu_read_lock();
+	flow_table = rcu_dereference(rxqueue->rps_flow_table);
+	if (flow_table && flow_id <= flow_table->mask) {
+		rflow = &flow_table->flows[flow_id];
+		cpu = ACCESS_ONCE(rflow->cpu);
+		if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
+		    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
+			   rflow->last_qtail) <
+		     (int)(10 * flow_table->mask)))
+			expire = false;
+	}
+	rcu_read_unlock();
+	return expire;
+}
+EXPORT_SYMBOL(rps_may_expire_flow);
+
+#endif /* CONFIG_RFS_ACCEL */
+
 /* Called from hardirq (IPI) context */
 static void rps_trigger_softirq(void *data)
 {
@@ -2963,7 +3110,8 @@
  * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
  * ARP on active-backup slaves with arp_validate enabled.
  */
-int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
+static int __skb_bond_should_drop(struct sk_buff *skb,
+				  struct net_device *master)
 {
 	struct net_device *dev = skb->dev;
 
@@ -2997,14 +3145,12 @@
 	}
 	return 0;
 }
-EXPORT_SYMBOL(__skb_bond_should_drop);
 
 static int __netif_receive_skb(struct sk_buff *skb)
 {
 	struct packet_type *ptype, *pt_prev;
 	rx_handler_func_t *rx_handler;
 	struct net_device *orig_dev;
-	struct net_device *master;
 	struct net_device *null_or_orig;
 	struct net_device *orig_or_bond;
 	int ret = NET_RX_DROP;
@@ -3031,15 +3177,19 @@
 	 */
 	null_or_orig = NULL;
 	orig_dev = skb->dev;
-	master = ACCESS_ONCE(orig_dev->master);
 	if (skb->deliver_no_wcard)
 		null_or_orig = orig_dev;
-	else if (master) {
-		if (skb_bond_should_drop(skb, master)) {
-			skb->deliver_no_wcard = 1;
-			null_or_orig = orig_dev; /* deliver only exact match */
-		} else
-			skb->dev = master;
+	else if (netif_is_bond_slave(orig_dev)) {
+		struct net_device *bond_master = ACCESS_ONCE(orig_dev->master);
+
+		if (likely(bond_master)) {
+			if (__skb_bond_should_drop(skb, bond_master)) {
+				skb->deliver_no_wcard = 1;
+				/* deliver only exact match */
+				null_or_orig = orig_dev;
+			} else
+				skb->dev = bond_master;
+		}
 	}
 
 	__this_cpu_inc(softnet_data.processed);
@@ -3423,6 +3573,8 @@
 	__skb_pull(skb, skb_headlen(skb));
 	skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
 	skb->vlan_tci = 0;
+	skb->dev = napi->dev;
+	skb->skb_iif = 0;
 
 	napi->skb = skb;
 }
@@ -3910,12 +4062,15 @@
 
 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-	struct net_device *dev = (v == SEQ_START_TOKEN) ?
-				  first_net_device(seq_file_net(seq)) :
-				  next_net_device((struct net_device *)v);
+	struct net_device *dev = v;
+
+	if (v == SEQ_START_TOKEN)
+		dev = first_net_device_rcu(seq_file_net(seq));
+	else
+		dev = next_net_device_rcu(dev);
 
 	++*pos;
-	return rcu_dereference(dev);
+	return dev;
 }
 
 void dev_seq_stop(struct seq_file *seq, void *v)
@@ -4199,15 +4354,14 @@
 
 
 /**
- *	netdev_set_master	-	set up master/slave pair
+ *	netdev_set_master	-	set up master pointer
  *	@slave: slave device
  *	@master: new master device
  *
  *	Changes the master device of the slave. Pass %NULL to break the
  *	bonding. The caller must hold the RTNL semaphore. On a failure
  *	a negative errno code is returned. On success the reference counts
- *	are adjusted, %RTM_NEWLINK is sent to the routing socket and the
- *	function returns zero.
+ *	are adjusted and the function returns zero.
  */
 int netdev_set_master(struct net_device *slave, struct net_device *master)
 {
@@ -4227,6 +4381,29 @@
 		synchronize_net();
 		dev_put(old);
 	}
+	return 0;
+}
+EXPORT_SYMBOL(netdev_set_master);
+
+/**
+ *	netdev_set_bond_master	-	set up bonding master/slave pair
+ *	@slave: slave device
+ *	@master: new master device
+ *
+ *	Changes the master device of the slave. Pass %NULL to break the
+ *	bonding. The caller must hold the RTNL semaphore. On a failure
+ *	a negative errno code is returned. On success %RTM_NEWLINK is sent
+ *	to the routing socket and the function returns zero.
+ */
+int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
+{
+	int err;
+
+	ASSERT_RTNL();
+
+	err = netdev_set_master(slave, master);
+	if (err)
+		return err;
 	if (master)
 		slave->flags |= IFF_SLAVE;
 	else
@@ -4235,7 +4412,7 @@
 	rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
 	return 0;
 }
-EXPORT_SYMBOL(netdev_set_master);
+EXPORT_SYMBOL(netdev_set_bond_master);
 
 static void dev_change_rx_flags(struct net_device *dev, int flags)
 {
@@ -4572,6 +4749,17 @@
 EXPORT_SYMBOL(dev_set_mtu);
 
 /**
+ *	dev_set_group - Change group this device belongs to
+ *	@dev: device
+ *	@new_group: group this device should belong to
+ */
+void dev_set_group(struct net_device *dev, int new_group)
+{
+	dev->group = new_group;
+}
+EXPORT_SYMBOL(dev_set_group);
+
+/**
  *	dev_set_mac_address - Change Media Access Control Address
  *	@dev: device
  *	@sa: new address
@@ -5059,43 +5247,58 @@
 
 	list_add(&dev->unreg_list, &single);
 	rollback_registered_many(&single);
+	list_del(&single);
 }
 
-unsigned long netdev_fix_features(unsigned long features, const char *name)
+u32 netdev_fix_features(struct net_device *dev, u32 features)
 {
+	/* Fix illegal checksum combinations */
+	if ((features & NETIF_F_HW_CSUM) &&
+	    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
+		netdev_info(dev, "mixed HW and IP checksum settings.\n");
+		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
+	}
+
+	if ((features & NETIF_F_NO_CSUM) &&
+	    (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
+		netdev_info(dev, "mixed no checksumming and other settings.\n");
+		features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
+	}
+
 	/* Fix illegal SG+CSUM combinations. */
 	if ((features & NETIF_F_SG) &&
 	    !(features & NETIF_F_ALL_CSUM)) {
-		if (name)
-			printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
-			       "checksum feature.\n", name);
+		netdev_info(dev,
+			    "Dropping NETIF_F_SG since no checksum feature.\n");
 		features &= ~NETIF_F_SG;
 	}
 
 	/* TSO requires that SG is present as well. */
 	if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
-		if (name)
-			printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
-			       "SG feature.\n", name);
+		netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n");
 		features &= ~NETIF_F_TSO;
 	}
 
+	/* Software GSO depends on SG. */
+	if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
+		netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
+		features &= ~NETIF_F_GSO;
+	}
+
+	/* UFO needs SG and checksumming */
 	if (features & NETIF_F_UFO) {
 		/* maybe split UFO into V4 and V6? */
 		if (!((features & NETIF_F_GEN_CSUM) ||
 		    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
 			    == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
-			if (name)
-				printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
-				       "since no checksum offload features.\n",
-				       name);
+			netdev_info(dev,
+				"Dropping NETIF_F_UFO since no checksum offload features.\n");
 			features &= ~NETIF_F_UFO;
 		}
 
 		if (!(features & NETIF_F_SG)) {
-			if (name)
-				printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
-				       "since no NETIF_F_SG feature.\n", name);
+			netdev_info(dev,
+				"Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
 			features &= ~NETIF_F_UFO;
 		}
 	}
@@ -5104,6 +5307,37 @@
 }
 EXPORT_SYMBOL(netdev_fix_features);
 
+void netdev_update_features(struct net_device *dev)
+{
+	u32 features;
+	int err = 0;
+
+	features = netdev_get_wanted_features(dev);
+
+	if (dev->netdev_ops->ndo_fix_features)
+		features = dev->netdev_ops->ndo_fix_features(dev, features);
+
+	/* driver might be less strict about feature dependencies */
+	features = netdev_fix_features(dev, features);
+
+	if (dev->features == features)
+		return;
+
+	netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n",
+		dev->features, features);
+
+	if (dev->netdev_ops->ndo_set_features)
+		err = dev->netdev_ops->ndo_set_features(dev, features);
+
+	if (!err)
+		dev->features = features;
+	else if (err < 0)
+		netdev_err(dev,
+			"set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
+			err, features, dev->features);
+}
+EXPORT_SYMBOL(netdev_update_features);
+
 /**
  *	netif_stacked_transfer_operstate -	transfer operstate
  *	@rootdev: the root or lower level device to transfer state from
@@ -5238,26 +5472,18 @@
 	if (dev->iflink == -1)
 		dev->iflink = dev->ifindex;
 
-	/* Fix illegal checksum combinations */
-	if ((dev->features & NETIF_F_HW_CSUM) &&
-	    (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
-		printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
-		       dev->name);
-		dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
-	}
+	/* Transfer changeable features to wanted_features and enable
+	 * software offloads (GSO and GRO).
+	 */
+	dev->hw_features |= NETIF_F_SOFT_FEATURES;
+	dev->wanted_features = (dev->features & dev->hw_features)
+		| NETIF_F_SOFT_FEATURES;
 
-	if ((dev->features & NETIF_F_NO_CSUM) &&
-	    (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
-		printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
-		       dev->name);
-		dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
-	}
+	/* Avoid warning from netdev_fix_features() for GSO without SG */
+	if (!(dev->wanted_features & NETIF_F_SG))
+		dev->wanted_features &= ~NETIF_F_GSO;
 
-	dev->features = netdev_fix_features(dev->features, dev->name);
-
-	/* Enable software GSO if SG is supported. */
-	if (dev->features & NETIF_F_SG)
-		dev->features |= NETIF_F_GSO;
+	netdev_update_features(dev);
 
 	/* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
 	 * vlan_dev_init() will do the dev->features check, so these features
@@ -5656,18 +5882,6 @@
 
 	dev_net_set(dev, &init_net);
 
-	dev->num_tx_queues = txqs;
-	dev->real_num_tx_queues = txqs;
-	if (netif_alloc_netdev_queues(dev))
-		goto free_pcpu;
-
-#ifdef CONFIG_RPS
-	dev->num_rx_queues = rxqs;
-	dev->real_num_rx_queues = rxqs;
-	if (netif_alloc_rx_queues(dev))
-		goto free_pcpu;
-#endif
-
 	dev->gso_max_size = GSO_MAX_SIZE;
 
 	INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
@@ -5677,9 +5891,27 @@
 	INIT_LIST_HEAD(&dev->link_watch_list);
 	dev->priv_flags = IFF_XMIT_DST_RELEASE;
 	setup(dev);
+
+	dev->num_tx_queues = txqs;
+	dev->real_num_tx_queues = txqs;
+	if (netif_alloc_netdev_queues(dev))
+		goto free_all;
+
+#ifdef CONFIG_RPS
+	dev->num_rx_queues = rxqs;
+	dev->real_num_rx_queues = rxqs;
+	if (netif_alloc_rx_queues(dev))
+		goto free_all;
+#endif
+
 	strcpy(dev->name, name);
+	dev->group = INIT_NETDEV_GROUP;
 	return dev;
 
+free_all:
+	free_netdev(dev);
+	return NULL;
+
 free_pcpu:
 	free_percpu(dev->pcpu_refcnt);
 	kfree(dev->_tx);
@@ -5988,8 +6220,7 @@
  *	@one to the master device with current feature set @all.  Will not
  *	enable anything that is off in @mask. Returns the new feature set.
  */
-unsigned long netdev_increment_features(unsigned long all, unsigned long one,
-					unsigned long mask)
+u32 netdev_increment_features(u32 all, u32 one, u32 mask)
 {
 	/* If device needs checksumming, downgrade to it. */
 	if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
@@ -6207,6 +6438,7 @@
 		}
 	}
 	unregister_netdevice_many(&dev_kill_list);
+	list_del(&dev_kill_list);
 	rtnl_unlock();
 }
 
diff --git a/net/core/dst.c b/net/core/dst.c
index b99c7c7..91104d3 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -164,7 +164,9 @@
 }
 EXPORT_SYMBOL(dst_discard);
 
-void *dst_alloc(struct dst_ops *ops)
+const u32 dst_default_metrics[RTAX_MAX];
+
+void *dst_alloc(struct dst_ops *ops, int initial_ref)
 {
 	struct dst_entry *dst;
 
@@ -175,11 +177,12 @@
 	dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC);
 	if (!dst)
 		return NULL;
-	atomic_set(&dst->__refcnt, 0);
+	atomic_set(&dst->__refcnt, initial_ref);
 	dst->ops = ops;
 	dst->lastuse = jiffies;
 	dst->path = dst;
 	dst->input = dst->output = dst_discard;
+	dst_init_metrics(dst, dst_default_metrics, true);
 #if RT_CACHE_DEBUG >= 2
 	atomic_inc(&dst_total);
 #endif
@@ -282,6 +285,42 @@
 }
 EXPORT_SYMBOL(dst_release);
 
+u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
+{
+	u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
+
+	if (p) {
+		u32 *old_p = __DST_METRICS_PTR(old);
+		unsigned long prev, new;
+
+		memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+
+		new = (unsigned long) p;
+		prev = cmpxchg(&dst->_metrics, old, new);
+
+		if (prev != old) {
+			kfree(p);
+			p = __DST_METRICS_PTR(prev);
+			if (prev & DST_METRICS_READ_ONLY)
+				p = NULL;
+		}
+	}
+	return p;
+}
+EXPORT_SYMBOL(dst_cow_metrics_generic);
+
+/* Caller asserts that dst_metrics_read_only(dst) is false.  */
+void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
+{
+	unsigned long prev, new;
+
+	new = (unsigned long) dst_default_metrics;
+	prev = cmpxchg(&dst->_metrics, old, new);
+	if (prev == old)
+		kfree(__DST_METRICS_PTR(old));
+}
+EXPORT_SYMBOL(__dst_destroy_metrics_generic);
+
 /**
  * skb_dst_set_noref - sets skb dst, without a reference
  * @skb: buffer
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 1774178..66cdc76 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -34,12 +34,6 @@
 }
 EXPORT_SYMBOL(ethtool_op_get_link);
 
-u32 ethtool_op_get_rx_csum(struct net_device *dev)
-{
-	return (dev->features & NETIF_F_ALL_CSUM) != 0;
-}
-EXPORT_SYMBOL(ethtool_op_get_rx_csum);
-
 u32 ethtool_op_get_tx_csum(struct net_device *dev)
 {
 	return (dev->features & NETIF_F_ALL_CSUM) != 0;
@@ -55,6 +49,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(ethtool_op_set_tx_csum);
 
 int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data)
 {
@@ -171,6 +166,306 @@
 
 /* Handlers for each ethtool command */
 
+#define ETHTOOL_DEV_FEATURE_WORDS	1
+
+static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
+{
+	struct ethtool_gfeatures cmd = {
+		.cmd = ETHTOOL_GFEATURES,
+		.size = ETHTOOL_DEV_FEATURE_WORDS,
+	};
+	struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS] = {
+		{
+			.available = dev->hw_features,
+			.requested = dev->wanted_features,
+			.active = dev->features,
+			.never_changed = NETIF_F_NEVER_CHANGE,
+		},
+	};
+	u32 __user *sizeaddr;
+	u32 copy_size;
+
+	sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size);
+	if (get_user(copy_size, sizeaddr))
+		return -EFAULT;
+
+	if (copy_size > ETHTOOL_DEV_FEATURE_WORDS)
+		copy_size = ETHTOOL_DEV_FEATURE_WORDS;
+
+	if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
+		return -EFAULT;
+	useraddr += sizeof(cmd);
+	if (copy_to_user(useraddr, features, copy_size * sizeof(*features)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
+{
+	struct ethtool_sfeatures cmd;
+	struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS];
+	int ret = 0;
+
+	if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+		return -EFAULT;
+	useraddr += sizeof(cmd);
+
+	if (cmd.size != ETHTOOL_DEV_FEATURE_WORDS)
+		return -EINVAL;
+
+	if (copy_from_user(features, useraddr, sizeof(features)))
+		return -EFAULT;
+
+	if (features[0].valid & ~NETIF_F_ETHTOOL_BITS)
+		return -EINVAL;
+
+	if (features[0].valid & ~dev->hw_features) {
+		features[0].valid &= dev->hw_features;
+		ret |= ETHTOOL_F_UNSUPPORTED;
+	}
+
+	dev->wanted_features &= ~features[0].valid;
+	dev->wanted_features |= features[0].valid & features[0].requested;
+	netdev_update_features(dev);
+
+	if ((dev->wanted_features ^ dev->features) & features[0].valid)
+		ret |= ETHTOOL_F_WISH;
+
+	return ret;
+}
+
+static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GSTRING_LEN] = {
+	/* NETIF_F_SG */              "tx-scatter-gather",
+	/* NETIF_F_IP_CSUM */         "tx-checksum-ipv4",
+	/* NETIF_F_NO_CSUM */         "tx-checksum-unneeded",
+	/* NETIF_F_HW_CSUM */         "tx-checksum-ip-generic",
+	/* NETIF_F_IPV6_CSUM */       "tx_checksum-ipv6",
+	/* NETIF_F_HIGHDMA */         "highdma",
+	/* NETIF_F_FRAGLIST */        "tx-scatter-gather-fraglist",
+	/* NETIF_F_HW_VLAN_TX */      "tx-vlan-hw-insert",
+
+	/* NETIF_F_HW_VLAN_RX */      "rx-vlan-hw-parse",
+	/* NETIF_F_HW_VLAN_FILTER */  "rx-vlan-filter",
+	/* NETIF_F_VLAN_CHALLENGED */ "vlan-challenged",
+	/* NETIF_F_GSO */             "tx-generic-segmentation",
+	/* NETIF_F_LLTX */            "tx-lockless",
+	/* NETIF_F_NETNS_LOCAL */     "netns-local",
+	/* NETIF_F_GRO */             "rx-gro",
+	/* NETIF_F_LRO */             "rx-lro",
+
+	/* NETIF_F_TSO */             "tx-tcp-segmentation",
+	/* NETIF_F_UFO */             "tx-udp-fragmentation",
+	/* NETIF_F_GSO_ROBUST */      "tx-gso-robust",
+	/* NETIF_F_TSO_ECN */         "tx-tcp-ecn-segmentation",
+	/* NETIF_F_TSO6 */            "tx-tcp6-segmentation",
+	/* NETIF_F_FSO */             "tx-fcoe-segmentation",
+	"",
+	"",
+
+	/* NETIF_F_FCOE_CRC */        "tx-checksum-fcoe-crc",
+	/* NETIF_F_SCTP_CSUM */       "tx-checksum-sctp",
+	/* NETIF_F_FCOE_MTU */        "fcoe-mtu",
+	/* NETIF_F_NTUPLE */          "rx-ntuple-filter",
+	/* NETIF_F_RXHASH */          "rx-hashing",
+	/* NETIF_F_RXCSUM */          "rx-checksum",
+	"",
+	"",
+};
+
+static int __ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+	const struct ethtool_ops *ops = dev->ethtool_ops;
+
+	if (sset == ETH_SS_FEATURES)
+		return ARRAY_SIZE(netdev_features_strings);
+
+	if (ops && ops->get_sset_count && ops->get_strings)
+		return ops->get_sset_count(dev, sset);
+	else
+		return -EOPNOTSUPP;
+}
+
+static void __ethtool_get_strings(struct net_device *dev,
+	u32 stringset, u8 *data)
+{
+	const struct ethtool_ops *ops = dev->ethtool_ops;
+
+	if (stringset == ETH_SS_FEATURES)
+		memcpy(data, netdev_features_strings,
+			sizeof(netdev_features_strings));
+	else
+		/* ops->get_strings is valid because checked earlier */
+		ops->get_strings(dev, stringset, data);
+}
+
+static u32 ethtool_get_feature_mask(u32 eth_cmd)
+{
+	/* feature masks of legacy discrete ethtool ops */
+
+	switch (eth_cmd) {
+	case ETHTOOL_GTXCSUM:
+	case ETHTOOL_STXCSUM:
+		return NETIF_F_ALL_CSUM | NETIF_F_SCTP_CSUM;
+	case ETHTOOL_GRXCSUM:
+	case ETHTOOL_SRXCSUM:
+		return NETIF_F_RXCSUM;
+	case ETHTOOL_GSG:
+	case ETHTOOL_SSG:
+		return NETIF_F_SG;
+	case ETHTOOL_GTSO:
+	case ETHTOOL_STSO:
+		return NETIF_F_ALL_TSO;
+	case ETHTOOL_GUFO:
+	case ETHTOOL_SUFO:
+		return NETIF_F_UFO;
+	case ETHTOOL_GGSO:
+	case ETHTOOL_SGSO:
+		return NETIF_F_GSO;
+	case ETHTOOL_GGRO:
+	case ETHTOOL_SGRO:
+		return NETIF_F_GRO;
+	default:
+		BUG();
+	}
+}
+
+static void *__ethtool_get_one_feature_actor(struct net_device *dev, u32 ethcmd)
+{
+	const struct ethtool_ops *ops = dev->ethtool_ops;
+
+	if (!ops)
+		return NULL;
+
+	switch (ethcmd) {
+	case ETHTOOL_GTXCSUM:
+		return ops->get_tx_csum;
+	case ETHTOOL_GRXCSUM:
+		return ops->get_rx_csum;
+	case ETHTOOL_SSG:
+		return ops->get_sg;
+	case ETHTOOL_STSO:
+		return ops->get_tso;
+	case ETHTOOL_SUFO:
+		return ops->get_ufo;
+	default:
+		return NULL;
+	}
+}
+
+static u32 __ethtool_get_rx_csum_oldbug(struct net_device *dev)
+{
+	return !!(dev->features & NETIF_F_ALL_CSUM);
+}
+
+static int ethtool_get_one_feature(struct net_device *dev,
+	char __user *useraddr, u32 ethcmd)
+{
+	u32 mask = ethtool_get_feature_mask(ethcmd);
+	struct ethtool_value edata = {
+		.cmd = ethcmd,
+		.data = !!(dev->features & mask),
+	};
+
+	/* compatibility with discrete get_ ops */
+	if (!(dev->hw_features & mask)) {
+		u32 (*actor)(struct net_device *);
+
+		actor = __ethtool_get_one_feature_actor(dev, ethcmd);
+
+		/* bug compatibility with old get_rx_csum */
+		if (ethcmd == ETHTOOL_GRXCSUM && !actor)
+			actor = __ethtool_get_rx_csum_oldbug;
+
+		if (actor)
+			edata.data = actor(dev);
+	}
+
+	if (copy_to_user(useraddr, &edata, sizeof(edata)))
+		return -EFAULT;
+	return 0;
+}
+
+static int __ethtool_set_tx_csum(struct net_device *dev, u32 data);
+static int __ethtool_set_rx_csum(struct net_device *dev, u32 data);
+static int __ethtool_set_sg(struct net_device *dev, u32 data);
+static int __ethtool_set_tso(struct net_device *dev, u32 data);
+static int __ethtool_set_ufo(struct net_device *dev, u32 data);
+
+static int ethtool_set_one_feature(struct net_device *dev,
+	void __user *useraddr, u32 ethcmd)
+{
+	struct ethtool_value edata;
+	u32 mask;
+
+	if (copy_from_user(&edata, useraddr, sizeof(edata)))
+		return -EFAULT;
+
+	mask = ethtool_get_feature_mask(ethcmd);
+	mask &= dev->hw_features;
+	if (mask) {
+		if (edata.data)
+			dev->wanted_features |= mask;
+		else
+			dev->wanted_features &= ~mask;
+
+		netdev_update_features(dev);
+		return 0;
+	}
+
+	/* Driver is not converted to ndo_fix_features or does not
+	 * support changing this offload. In the latter case it won't
+	 * have corresponding ethtool_ops field set.
+	 *
+	 * Following part is to be removed after all drivers advertise
+	 * their changeable features in netdev->hw_features and stop
+	 * using discrete offload setting ops.
+	 */
+
+	switch (ethcmd) {
+	case ETHTOOL_STXCSUM:
+		return __ethtool_set_tx_csum(dev, edata.data);
+	case ETHTOOL_SRXCSUM:
+		return __ethtool_set_rx_csum(dev, edata.data);
+	case ETHTOOL_SSG:
+		return __ethtool_set_sg(dev, edata.data);
+	case ETHTOOL_STSO:
+		return __ethtool_set_tso(dev, edata.data);
+	case ETHTOOL_SUFO:
+		return __ethtool_set_ufo(dev, edata.data);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int __ethtool_set_flags(struct net_device *dev, u32 data)
+{
+	u32 changed;
+
+	if (data & ~flags_dup_features)
+		return -EINVAL;
+
+	/* legacy set_flags() op */
+	if (dev->ethtool_ops->set_flags) {
+		if (unlikely(dev->hw_features & flags_dup_features))
+			netdev_warn(dev,
+				"driver BUG: mixed hw_features and set_flags()\n");
+		return dev->ethtool_ops->set_flags(dev, data);
+	}
+
+	/* allow changing only bits set in hw_features */
+	changed = (data ^ dev->wanted_features) & flags_dup_features;
+	if (changed & ~dev->hw_features)
+		return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP;
+
+	dev->wanted_features =
+		(dev->wanted_features & ~changed) | data;
+
+	netdev_update_features(dev);
+
+	return 0;
+}
+
 static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
 {
 	struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
@@ -251,14 +546,10 @@
 						    void __user *useraddr)
 {
 	struct ethtool_sset_info info;
-	const struct ethtool_ops *ops = dev->ethtool_ops;
 	u64 sset_mask;
 	int i, idx = 0, n_bits = 0, ret, rc;
 	u32 *info_buf = NULL;
 
-	if (!ops->get_sset_count)
-		return -EOPNOTSUPP;
-
 	if (copy_from_user(&info, useraddr, sizeof(info)))
 		return -EFAULT;
 
@@ -285,7 +576,7 @@
 		if (!(sset_mask & (1ULL << i)))
 			continue;
 
-		rc = ops->get_sset_count(dev, i);
+		rc = __ethtool_get_sset_count(dev, i);
 		if (rc >= 0) {
 			info.sset_mask |= (1ULL << i);
 			info_buf[idx++] = rc;
@@ -817,7 +1108,7 @@
 	if (regs.len > reglen)
 		regs.len = reglen;
 
-	regbuf = vmalloc(reglen);
+	regbuf = vzalloc(reglen);
 	if (!regbuf)
 		return -ENOMEM;
 
@@ -1091,6 +1382,9 @@
 {
 	int err;
 
+	if (data && !(dev->features & NETIF_F_ALL_CSUM))
+		return -EINVAL;
+
 	if (!data && dev->ethtool_ops->set_tso) {
 		err = dev->ethtool_ops->set_tso(dev, 0);
 		if (err)
@@ -1105,145 +1399,55 @@
 	return dev->ethtool_ops->set_sg(dev, data);
 }
 
-static int ethtool_set_tx_csum(struct net_device *dev, char __user *useraddr)
+static int __ethtool_set_tx_csum(struct net_device *dev, u32 data)
 {
-	struct ethtool_value edata;
 	int err;
 
 	if (!dev->ethtool_ops->set_tx_csum)
 		return -EOPNOTSUPP;
 
-	if (copy_from_user(&edata, useraddr, sizeof(edata)))
-		return -EFAULT;
-
-	if (!edata.data && dev->ethtool_ops->set_sg) {
+	if (!data && dev->ethtool_ops->set_sg) {
 		err = __ethtool_set_sg(dev, 0);
 		if (err)
 			return err;
 	}
 
-	return dev->ethtool_ops->set_tx_csum(dev, edata.data);
+	return dev->ethtool_ops->set_tx_csum(dev, data);
 }
-EXPORT_SYMBOL(ethtool_op_set_tx_csum);
 
-static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr)
+static int __ethtool_set_rx_csum(struct net_device *dev, u32 data)
 {
-	struct ethtool_value edata;
-
 	if (!dev->ethtool_ops->set_rx_csum)
 		return -EOPNOTSUPP;
 
-	if (copy_from_user(&edata, useraddr, sizeof(edata)))
-		return -EFAULT;
-
-	if (!edata.data && dev->ethtool_ops->set_sg)
+	if (!data)
 		dev->features &= ~NETIF_F_GRO;
 
-	return dev->ethtool_ops->set_rx_csum(dev, edata.data);
+	return dev->ethtool_ops->set_rx_csum(dev, data);
 }
 
-static int ethtool_set_sg(struct net_device *dev, char __user *useraddr)
+static int __ethtool_set_tso(struct net_device *dev, u32 data)
 {
-	struct ethtool_value edata;
-
-	if (!dev->ethtool_ops->set_sg)
-		return -EOPNOTSUPP;
-
-	if (copy_from_user(&edata, useraddr, sizeof(edata)))
-		return -EFAULT;
-
-	if (edata.data &&
-	    !(dev->features & NETIF_F_ALL_CSUM))
-		return -EINVAL;
-
-	return __ethtool_set_sg(dev, edata.data);
-}
-
-static int ethtool_set_tso(struct net_device *dev, char __user *useraddr)
-{
-	struct ethtool_value edata;
-
 	if (!dev->ethtool_ops->set_tso)
 		return -EOPNOTSUPP;
 
-	if (copy_from_user(&edata, useraddr, sizeof(edata)))
-		return -EFAULT;
-
-	if (edata.data && !(dev->features & NETIF_F_SG))
+	if (data && !(dev->features & NETIF_F_SG))
 		return -EINVAL;
 
-	return dev->ethtool_ops->set_tso(dev, edata.data);
+	return dev->ethtool_ops->set_tso(dev, data);
 }
 
-static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr)
+static int __ethtool_set_ufo(struct net_device *dev, u32 data)
 {
-	struct ethtool_value edata;
-
 	if (!dev->ethtool_ops->set_ufo)
 		return -EOPNOTSUPP;
-	if (copy_from_user(&edata, useraddr, sizeof(edata)))
-		return -EFAULT;
-	if (edata.data && !(dev->features & NETIF_F_SG))
+	if (data && !(dev->features & NETIF_F_SG))
 		return -EINVAL;
-	if (edata.data && !((dev->features & NETIF_F_GEN_CSUM) ||
+	if (data && !((dev->features & NETIF_F_GEN_CSUM) ||
 		(dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
 			== (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)))
 		return -EINVAL;
-	return dev->ethtool_ops->set_ufo(dev, edata.data);
-}
-
-static int ethtool_get_gso(struct net_device *dev, char __user *useraddr)
-{
-	struct ethtool_value edata = { ETHTOOL_GGSO };
-
-	edata.data = dev->features & NETIF_F_GSO;
-	if (copy_to_user(useraddr, &edata, sizeof(edata)))
-		return -EFAULT;
-	return 0;
-}
-
-static int ethtool_set_gso(struct net_device *dev, char __user *useraddr)
-{
-	struct ethtool_value edata;
-
-	if (copy_from_user(&edata, useraddr, sizeof(edata)))
-		return -EFAULT;
-	if (edata.data)
-		dev->features |= NETIF_F_GSO;
-	else
-		dev->features &= ~NETIF_F_GSO;
-	return 0;
-}
-
-static int ethtool_get_gro(struct net_device *dev, char __user *useraddr)
-{
-	struct ethtool_value edata = { ETHTOOL_GGRO };
-
-	edata.data = dev->features & NETIF_F_GRO;
-	if (copy_to_user(useraddr, &edata, sizeof(edata)))
-		return -EFAULT;
-	return 0;
-}
-
-static int ethtool_set_gro(struct net_device *dev, char __user *useraddr)
-{
-	struct ethtool_value edata;
-
-	if (copy_from_user(&edata, useraddr, sizeof(edata)))
-		return -EFAULT;
-
-	if (edata.data) {
-		u32 rxcsum = dev->ethtool_ops->get_rx_csum ?
-				dev->ethtool_ops->get_rx_csum(dev) :
-				ethtool_op_get_rx_csum(dev);
-
-		if (!rxcsum)
-			return -EINVAL;
-		dev->features |= NETIF_F_GRO;
-	} else
-		dev->features &= ~NETIF_F_GRO;
-
-	return 0;
+	return dev->ethtool_ops->set_ufo(dev, data);
 }
 
 static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
@@ -1287,17 +1491,13 @@
 static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
 {
 	struct ethtool_gstrings gstrings;
-	const struct ethtool_ops *ops = dev->ethtool_ops;
 	u8 *data;
 	int ret;
 
-	if (!ops->get_strings || !ops->get_sset_count)
-		return -EOPNOTSUPP;
-
 	if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
 		return -EFAULT;
 
-	ret = ops->get_sset_count(dev, gstrings.string_set);
+	ret = __ethtool_get_sset_count(dev, gstrings.string_set);
 	if (ret < 0)
 		return ret;
 
@@ -1307,7 +1507,7 @@
 	if (!data)
 		return -ENOMEM;
 
-	ops->get_strings(dev, gstrings.string_set, data);
+	__ethtool_get_strings(dev, gstrings.string_set, data);
 
 	ret = -EFAULT;
 	if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
@@ -1317,7 +1517,7 @@
 		goto out;
 	ret = 0;
 
- out:
+out:
 	kfree(data);
 	return ret;
 }
@@ -1458,7 +1658,7 @@
 	void __user *useraddr = ifr->ifr_data;
 	u32 ethcmd;
 	int rc;
-	unsigned long old_features;
+	u32 old_features;
 
 	if (!dev || !netif_device_present(dev))
 		return -ENODEV;
@@ -1500,6 +1700,7 @@
 	case ETHTOOL_GRXCLSRLCNT:
 	case ETHTOOL_GRXCLSRULE:
 	case ETHTOOL_GRXCLSRLALL:
+	case ETHTOOL_GFEATURES:
 		break;
 	default:
 		if (!capable(CAP_NET_ADMIN))
@@ -1570,42 +1771,6 @@
 	case ETHTOOL_SPAUSEPARAM:
 		rc = ethtool_set_pauseparam(dev, useraddr);
 		break;
-	case ETHTOOL_GRXCSUM:
-		rc = ethtool_get_value(dev, useraddr, ethcmd,
-				       (dev->ethtool_ops->get_rx_csum ?
-					dev->ethtool_ops->get_rx_csum :
-					ethtool_op_get_rx_csum));
-		break;
-	case ETHTOOL_SRXCSUM:
-		rc = ethtool_set_rx_csum(dev, useraddr);
-		break;
-	case ETHTOOL_GTXCSUM:
-		rc = ethtool_get_value(dev, useraddr, ethcmd,
-				       (dev->ethtool_ops->get_tx_csum ?
-					dev->ethtool_ops->get_tx_csum :
-					ethtool_op_get_tx_csum));
-		break;
-	case ETHTOOL_STXCSUM:
-		rc = ethtool_set_tx_csum(dev, useraddr);
-		break;
-	case ETHTOOL_GSG:
-		rc = ethtool_get_value(dev, useraddr, ethcmd,
-				       (dev->ethtool_ops->get_sg ?
-					dev->ethtool_ops->get_sg :
-					ethtool_op_get_sg));
-		break;
-	case ETHTOOL_SSG:
-		rc = ethtool_set_sg(dev, useraddr);
-		break;
-	case ETHTOOL_GTSO:
-		rc = ethtool_get_value(dev, useraddr, ethcmd,
-				       (dev->ethtool_ops->get_tso ?
-					dev->ethtool_ops->get_tso :
-					ethtool_op_get_tso));
-		break;
-	case ETHTOOL_STSO:
-		rc = ethtool_set_tso(dev, useraddr);
-		break;
 	case ETHTOOL_TEST:
 		rc = ethtool_self_test(dev, useraddr);
 		break;
@@ -1621,21 +1786,6 @@
 	case ETHTOOL_GPERMADDR:
 		rc = ethtool_get_perm_addr(dev, useraddr);
 		break;
-	case ETHTOOL_GUFO:
-		rc = ethtool_get_value(dev, useraddr, ethcmd,
-				       (dev->ethtool_ops->get_ufo ?
-					dev->ethtool_ops->get_ufo :
-					ethtool_op_get_ufo));
-		break;
-	case ETHTOOL_SUFO:
-		rc = ethtool_set_ufo(dev, useraddr);
-		break;
-	case ETHTOOL_GGSO:
-		rc = ethtool_get_gso(dev, useraddr);
-		break;
-	case ETHTOOL_SGSO:
-		rc = ethtool_set_gso(dev, useraddr);
-		break;
 	case ETHTOOL_GFLAGS:
 		rc = ethtool_get_value(dev, useraddr, ethcmd,
 				       (dev->ethtool_ops->get_flags ?
@@ -1643,8 +1793,7 @@
 					ethtool_op_get_flags));
 		break;
 	case ETHTOOL_SFLAGS:
-		rc = ethtool_set_value(dev, useraddr,
-				       dev->ethtool_ops->set_flags);
+		rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags);
 		break;
 	case ETHTOOL_GPFLAGS:
 		rc = ethtool_get_value(dev, useraddr, ethcmd,
@@ -1666,12 +1815,6 @@
 	case ETHTOOL_SRXCLSRLINS:
 		rc = ethtool_set_rxnfc(dev, ethcmd, useraddr);
 		break;
-	case ETHTOOL_GGRO:
-		rc = ethtool_get_gro(dev, useraddr);
-		break;
-	case ETHTOOL_SGRO:
-		rc = ethtool_set_gro(dev, useraddr);
-		break;
 	case ETHTOOL_FLASHDEV:
 		rc = ethtool_flash_device(dev, useraddr);
 		break;
@@ -1693,6 +1836,30 @@
 	case ETHTOOL_SRXFHINDIR:
 		rc = ethtool_set_rxfh_indir(dev, useraddr);
 		break;
+	case ETHTOOL_GFEATURES:
+		rc = ethtool_get_features(dev, useraddr);
+		break;
+	case ETHTOOL_SFEATURES:
+		rc = ethtool_set_features(dev, useraddr);
+		break;
+	case ETHTOOL_GTXCSUM:
+	case ETHTOOL_GRXCSUM:
+	case ETHTOOL_GSG:
+	case ETHTOOL_GTSO:
+	case ETHTOOL_GUFO:
+	case ETHTOOL_GGSO:
+	case ETHTOOL_GGRO:
+		rc = ethtool_get_one_feature(dev, useraddr, ethcmd);
+		break;
+	case ETHTOOL_STXCSUM:
+	case ETHTOOL_SRXCSUM:
+	case ETHTOOL_SSG:
+	case ETHTOOL_STSO:
+	case ETHTOOL_SUFO:
+	case ETHTOOL_SGSO:
+	case ETHTOOL_SGRO:
+		rc = ethtool_set_one_feature(dev, useraddr, ethcmd);
+		break;
 	default:
 		rc = -EOPNOTSUPP;
 	}
diff --git a/net/core/filter.c b/net/core/filter.c
index afc5837..232b187 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -142,14 +142,14 @@
 	if (err)
 		return err;
 
-	rcu_read_lock_bh();
-	filter = rcu_dereference_bh(sk->sk_filter);
+	rcu_read_lock();
+	filter = rcu_dereference(sk->sk_filter);
 	if (filter) {
 		unsigned int pkt_len = sk_run_filter(skb, filter->insns);
 
 		err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
 	}
-	rcu_read_unlock_bh();
+	rcu_read_unlock();
 
 	return err;
 }
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 60a9029..799f06e 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -316,7 +316,7 @@
 {
 	size_t size = entries * sizeof(struct neighbour *);
 	struct neigh_hash_table *ret;
-	struct neighbour **buckets;
+	struct neighbour __rcu **buckets;
 
 	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
 	if (!ret)
@@ -324,14 +324,14 @@
 	if (size <= PAGE_SIZE)
 		buckets = kzalloc(size, GFP_ATOMIC);
 	else
-		buckets = (struct neighbour **)
+		buckets = (struct neighbour __rcu **)
 			  __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
 					   get_order(size));
 	if (!buckets) {
 		kfree(ret);
 		return NULL;
 	}
-	rcu_assign_pointer(ret->hash_buckets, buckets);
+	ret->hash_buckets = buckets;
 	ret->hash_mask = entries - 1;
 	get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
 	return ret;
@@ -343,7 +343,7 @@
 						    struct neigh_hash_table,
 						    rcu);
 	size_t size = (nht->hash_mask + 1) * sizeof(struct neighbour *);
-	struct neighbour **buckets = nht->hash_buckets;
+	struct neighbour __rcu **buckets = nht->hash_buckets;
 
 	if (size <= PAGE_SIZE)
 		kfree(buckets);
@@ -1540,7 +1540,7 @@
 		panic("cannot create neighbour proc dir entry");
 #endif
 
-	tbl->nht = neigh_hash_alloc(8);
+	RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(8));
 
 	phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
 	tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
@@ -1602,7 +1602,8 @@
 	}
 	write_unlock(&neigh_tbl_lock);
 
-	call_rcu(&tbl->nht->rcu, neigh_hash_free_rcu);
+	call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
+		 neigh_hash_free_rcu);
 	tbl->nht = NULL;
 
 	kfree(tbl->phash_buckets);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index e23c01b..5ceb257 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -99,7 +99,7 @@
 NETDEVICE_SHOW(addr_len, fmt_dec);
 NETDEVICE_SHOW(iflink, fmt_dec);
 NETDEVICE_SHOW(ifindex, fmt_dec);
-NETDEVICE_SHOW(features, fmt_long_hex);
+NETDEVICE_SHOW(features, fmt_hex);
 NETDEVICE_SHOW(type, fmt_dec);
 NETDEVICE_SHOW(link_mode, fmt_dec);
 
@@ -295,6 +295,20 @@
 	return ret;
 }
 
+NETDEVICE_SHOW(group, fmt_dec);
+
+static int change_group(struct net_device *net, unsigned long new_group)
+{
+	dev_set_group(net, (int) new_group);
+	return 0;
+}
+
+static ssize_t store_group(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t len)
+{
+	return netdev_store(dev, attr, buf, len, change_group);
+}
+
 static struct device_attribute net_class_attributes[] = {
 	__ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL),
 	__ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
@@ -316,6 +330,7 @@
 	__ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags),
 	__ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
 	       store_tx_queue_len),
+	__ATTR(netdev_group, S_IRUGO | S_IWUSR, show_group, store_group),
 	{}
 };
 
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index a9e7fc4..d73b77a 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -251,6 +251,7 @@
 	int max_pkt_size;	/* = ETH_ZLEN; */
 	int pkt_overhead;	/* overhead for MPLS, VLANs, IPSEC etc */
 	int nfrags;
+	struct page *page;
 	u64 delay;		/* nano-seconds */
 
 	__u64 count;		/* Default No packets to send */
@@ -1134,6 +1135,10 @@
 		if (node_possible(value)) {
 			pkt_dev->node = value;
 			sprintf(pg_result, "OK: node=%d", pkt_dev->node);
+			if (pkt_dev->page) {
+				put_page(pkt_dev->page);
+				pkt_dev->page = NULL;
+			}
 		}
 		else
 			sprintf(pg_result, "ERROR: node not possible");
@@ -2605,6 +2610,90 @@
 	return htons(id | (cfi << 12) | (prio << 13));
 }
 
+static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
+				int datalen)
+{
+	struct timeval timestamp;
+	struct pktgen_hdr *pgh;
+
+	pgh = (struct pktgen_hdr *)skb_put(skb, sizeof(*pgh));
+	datalen -= sizeof(*pgh);
+
+	if (pkt_dev->nfrags <= 0) {
+		pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
+		memset(pgh + 1, 0, datalen);
+	} else {
+		int frags = pkt_dev->nfrags;
+		int i, len;
+
+
+		if (frags > MAX_SKB_FRAGS)
+			frags = MAX_SKB_FRAGS;
+		len = datalen - frags * PAGE_SIZE;
+		if (len > 0) {
+			memset(skb_put(skb, len), 0, len);
+			datalen = frags * PAGE_SIZE;
+		}
+
+		i = 0;
+		while (datalen > 0) {
+			if (unlikely(!pkt_dev->page)) {
+				int node = numa_node_id();
+
+				if (pkt_dev->node >= 0 && (pkt_dev->flags & F_NODE))
+					node = pkt_dev->node;
+				pkt_dev->page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
+				if (!pkt_dev->page)
+					break;
+			}
+			skb_shinfo(skb)->frags[i].page = pkt_dev->page;
+			get_page(pkt_dev->page);
+			skb_shinfo(skb)->frags[i].page_offset = 0;
+			skb_shinfo(skb)->frags[i].size =
+			    (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
+			datalen -= skb_shinfo(skb)->frags[i].size;
+			skb->len += skb_shinfo(skb)->frags[i].size;
+			skb->data_len += skb_shinfo(skb)->frags[i].size;
+			i++;
+			skb_shinfo(skb)->nr_frags = i;
+		}
+
+		while (i < frags) {
+			int rem;
+
+			if (i == 0)
+				break;
+
+			rem = skb_shinfo(skb)->frags[i - 1].size / 2;
+			if (rem == 0)
+				break;
+
+			skb_shinfo(skb)->frags[i - 1].size -= rem;
+
+			skb_shinfo(skb)->frags[i] =
+			    skb_shinfo(skb)->frags[i - 1];
+			get_page(skb_shinfo(skb)->frags[i].page);
+			skb_shinfo(skb)->frags[i].page =
+			    skb_shinfo(skb)->frags[i - 1].page;
+			skb_shinfo(skb)->frags[i].page_offset +=
+			    skb_shinfo(skb)->frags[i - 1].size;
+			skb_shinfo(skb)->frags[i].size = rem;
+			i++;
+			skb_shinfo(skb)->nr_frags = i;
+		}
+	}
+
+	/* Stamp the time, and sequence number,
+	 * convert them to network byte order
+	 */
+	pgh->pgh_magic = htonl(PKTGEN_MAGIC);
+	pgh->seq_num = htonl(pkt_dev->seq_num);
+
+	do_gettimeofday(&timestamp);
+	pgh->tv_sec = htonl(timestamp.tv_sec);
+	pgh->tv_usec = htonl(timestamp.tv_usec);
+}
+
 static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
 					struct pktgen_dev *pkt_dev)
 {
@@ -2613,7 +2702,6 @@
 	struct udphdr *udph;
 	int datalen, iplen;
 	struct iphdr *iph;
-	struct pktgen_hdr *pgh = NULL;
 	__be16 protocol = htons(ETH_P_IP);
 	__be32 *mpls;
 	__be16 *vlan_tci = NULL;                 /* Encapsulates priority and VLAN ID */
@@ -2729,76 +2817,7 @@
 			   pkt_dev->pkt_overhead);
 	skb->dev = odev;
 	skb->pkt_type = PACKET_HOST;
-
-	if (pkt_dev->nfrags <= 0) {
-		pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
-		memset(pgh + 1, 0, datalen - sizeof(struct pktgen_hdr));
-	} else {
-		int frags = pkt_dev->nfrags;
-		int i, len;
-
-		pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8);
-
-		if (frags > MAX_SKB_FRAGS)
-			frags = MAX_SKB_FRAGS;
-		if (datalen > frags * PAGE_SIZE) {
-			len = datalen - frags * PAGE_SIZE;
-			memset(skb_put(skb, len), 0, len);
-			datalen = frags * PAGE_SIZE;
-		}
-
-		i = 0;
-		while (datalen > 0) {
-			struct page *page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
-			skb_shinfo(skb)->frags[i].page = page;
-			skb_shinfo(skb)->frags[i].page_offset = 0;
-			skb_shinfo(skb)->frags[i].size =
-			    (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
-			datalen -= skb_shinfo(skb)->frags[i].size;
-			skb->len += skb_shinfo(skb)->frags[i].size;
-			skb->data_len += skb_shinfo(skb)->frags[i].size;
-			i++;
-			skb_shinfo(skb)->nr_frags = i;
-		}
-
-		while (i < frags) {
-			int rem;
-
-			if (i == 0)
-				break;
-
-			rem = skb_shinfo(skb)->frags[i - 1].size / 2;
-			if (rem == 0)
-				break;
-
-			skb_shinfo(skb)->frags[i - 1].size -= rem;
-
-			skb_shinfo(skb)->frags[i] =
-			    skb_shinfo(skb)->frags[i - 1];
-			get_page(skb_shinfo(skb)->frags[i].page);
-			skb_shinfo(skb)->frags[i].page =
-			    skb_shinfo(skb)->frags[i - 1].page;
-			skb_shinfo(skb)->frags[i].page_offset +=
-			    skb_shinfo(skb)->frags[i - 1].size;
-			skb_shinfo(skb)->frags[i].size = rem;
-			i++;
-			skb_shinfo(skb)->nr_frags = i;
-		}
-	}
-
-	/* Stamp the time, and sequence number,
-	 * convert them to network byte order
-	 */
-	if (pgh) {
-		struct timeval timestamp;
-
-		pgh->pgh_magic = htonl(PKTGEN_MAGIC);
-		pgh->seq_num = htonl(pkt_dev->seq_num);
-
-		do_gettimeofday(&timestamp);
-		pgh->tv_sec = htonl(timestamp.tv_sec);
-		pgh->tv_usec = htonl(timestamp.tv_usec);
-	}
+	pktgen_finalize_skb(pkt_dev, skb, datalen);
 
 #ifdef CONFIG_XFRM
 	if (!process_ipsec(pkt_dev, skb, protocol))
@@ -2980,7 +2999,6 @@
 	struct udphdr *udph;
 	int datalen;
 	struct ipv6hdr *iph;
-	struct pktgen_hdr *pgh = NULL;
 	__be16 protocol = htons(ETH_P_IPV6);
 	__be32 *mpls;
 	__be16 *vlan_tci = NULL;                 /* Encapsulates priority and VLAN ID */
@@ -3083,75 +3101,7 @@
 	skb->dev = odev;
 	skb->pkt_type = PACKET_HOST;
 
-	if (pkt_dev->nfrags <= 0)
-		pgh = (struct pktgen_hdr *)skb_put(skb, datalen);
-	else {
-		int frags = pkt_dev->nfrags;
-		int i;
-
-		pgh = (struct pktgen_hdr *)(((char *)(udph)) + 8);
-
-		if (frags > MAX_SKB_FRAGS)
-			frags = MAX_SKB_FRAGS;
-		if (datalen > frags * PAGE_SIZE) {
-			skb_put(skb, datalen - frags * PAGE_SIZE);
-			datalen = frags * PAGE_SIZE;
-		}
-
-		i = 0;
-		while (datalen > 0) {
-			struct page *page = alloc_pages(GFP_KERNEL, 0);
-			skb_shinfo(skb)->frags[i].page = page;
-			skb_shinfo(skb)->frags[i].page_offset = 0;
-			skb_shinfo(skb)->frags[i].size =
-			    (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
-			datalen -= skb_shinfo(skb)->frags[i].size;
-			skb->len += skb_shinfo(skb)->frags[i].size;
-			skb->data_len += skb_shinfo(skb)->frags[i].size;
-			i++;
-			skb_shinfo(skb)->nr_frags = i;
-		}
-
-		while (i < frags) {
-			int rem;
-
-			if (i == 0)
-				break;
-
-			rem = skb_shinfo(skb)->frags[i - 1].size / 2;
-			if (rem == 0)
-				break;
-
-			skb_shinfo(skb)->frags[i - 1].size -= rem;
-
-			skb_shinfo(skb)->frags[i] =
-			    skb_shinfo(skb)->frags[i - 1];
-			get_page(skb_shinfo(skb)->frags[i].page);
-			skb_shinfo(skb)->frags[i].page =
-			    skb_shinfo(skb)->frags[i - 1].page;
-			skb_shinfo(skb)->frags[i].page_offset +=
-			    skb_shinfo(skb)->frags[i - 1].size;
-			skb_shinfo(skb)->frags[i].size = rem;
-			i++;
-			skb_shinfo(skb)->nr_frags = i;
-		}
-	}
-
-	/* Stamp the time, and sequence number,
-	 * convert them to network byte order
-	 * should we update cloned packets too ?
-	 */
-	if (pgh) {
-		struct timeval timestamp;
-
-		pgh->pgh_magic = htonl(PKTGEN_MAGIC);
-		pgh->seq_num = htonl(pkt_dev->seq_num);
-
-		do_gettimeofday(&timestamp);
-		pgh->tv_sec = htonl(timestamp.tv_sec);
-		pgh->tv_usec = htonl(timestamp.tv_usec);
-	}
-	/* pkt_dev->seq_num++; FF: you really mean this? */
+	pktgen_finalize_skb(pkt_dev, skb, datalen);
 
 	return skb;
 }
@@ -3884,6 +3834,8 @@
 	free_SAs(pkt_dev);
 #endif
 	vfree(pkt_dev->flows);
+	if (pkt_dev->page)
+		put_page(pkt_dev->page);
 	kfree(pkt_dev);
 	return 0;
 }
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index a5f7535..49f7ea5 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -868,6 +868,7 @@
 		   netif_running(dev) ? dev->operstate : IF_OPER_DOWN);
 	NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode);
 	NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
+	NLA_PUT_U32(skb, IFLA_GROUP, dev->group);
 
 	if (dev->ifindex != dev->iflink)
 		NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
@@ -1035,6 +1036,7 @@
 	[IFLA_MAP]		= { .len = sizeof(struct rtnl_link_ifmap) },
 	[IFLA_MTU]		= { .type = NLA_U32 },
 	[IFLA_LINK]		= { .type = NLA_U32 },
+	[IFLA_MASTER]		= { .type = NLA_U32 },
 	[IFLA_TXQLEN]		= { .type = NLA_U32 },
 	[IFLA_WEIGHT]		= { .type = NLA_U32 },
 	[IFLA_OPERSTATE]	= { .type = NLA_U8 },
@@ -1121,8 +1123,7 @@
 				return -EOPNOTSUPP;
 
 			if (af_ops->validate_link_af) {
-				err = af_ops->validate_link_af(dev,
-							tb[IFLA_AF_SPEC]);
+				err = af_ops->validate_link_af(dev, af);
 				if (err < 0)
 					return err;
 			}
@@ -1178,6 +1179,41 @@
 	return err;
 }
 
+static int do_set_master(struct net_device *dev, int ifindex)
+{
+	struct net_device *master_dev;
+	const struct net_device_ops *ops;
+	int err;
+
+	if (dev->master) {
+		if (dev->master->ifindex == ifindex)
+			return 0;
+		ops = dev->master->netdev_ops;
+		if (ops->ndo_del_slave) {
+			err = ops->ndo_del_slave(dev->master, dev);
+			if (err)
+				return err;
+		} else {
+			return -EOPNOTSUPP;
+		}
+	}
+
+	if (ifindex) {
+		master_dev = __dev_get_by_index(dev_net(dev), ifindex);
+		if (!master_dev)
+			return -EINVAL;
+		ops = master_dev->netdev_ops;
+		if (ops->ndo_add_slave) {
+			err = ops->ndo_add_slave(master_dev, dev);
+			if (err)
+				return err;
+		} else {
+			return -EOPNOTSUPP;
+		}
+	}
+	return 0;
+}
+
 static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
 		      struct nlattr **tb, char *ifname, int modified)
 {
@@ -1265,6 +1301,11 @@
 		modified = 1;
 	}
 
+	if (tb[IFLA_GROUP]) {
+		dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
+		modified = 1;
+	}
+
 	/*
 	 * Interface selected by interface index but interface
 	 * name provided implies that a name change has been
@@ -1296,6 +1337,13 @@
 			goto errout;
 	}
 
+	if (tb[IFLA_MASTER]) {
+		err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
+		if (err)
+			goto errout;
+		modified = 1;
+	}
+
 	if (tb[IFLA_TXQLEN])
 		dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
 
@@ -1542,6 +1590,8 @@
 		set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
 	if (tb[IFLA_LINKMODE])
 		dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
+	if (tb[IFLA_GROUP])
+		dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
 
 	return dev;
 
@@ -1552,6 +1602,24 @@
 }
 EXPORT_SYMBOL(rtnl_create_link);
 
+static int rtnl_group_changelink(struct net *net, int group,
+		struct ifinfomsg *ifm,
+		struct nlattr **tb)
+{
+	struct net_device *dev;
+	int err;
+
+	for_each_netdev(net, dev) {
+		if (dev->group == group) {
+			err = do_setlink(dev, ifm, tb, NULL, 0);
+			if (err < 0)
+				return err;
+		}
+	}
+
+	return 0;
+}
+
 static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 {
 	struct net *net = sock_net(skb->sk);
@@ -1579,10 +1647,12 @@
 	ifm = nlmsg_data(nlh);
 	if (ifm->ifi_index > 0)
 		dev = __dev_get_by_index(net, ifm->ifi_index);
-	else if (ifname[0])
-		dev = __dev_get_by_name(net, ifname);
-	else
-		dev = NULL;
+	else {
+		if (ifname[0])
+			dev = __dev_get_by_name(net, ifname);
+		else
+			dev = NULL;
+	}
 
 	err = validate_linkmsg(dev, tb);
 	if (err < 0)
@@ -1646,8 +1716,13 @@
 			return do_setlink(dev, ifm, tb, ifname, modified);
 		}
 
-		if (!(nlh->nlmsg_flags & NLM_F_CREATE))
+		if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
+			if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
+				return rtnl_group_changelink(net,
+						nla_get_u32(tb[IFLA_GROUP]),
+						ifm, tb);
 			return -ENODEV;
+		}
 
 		if (ifm->ifi_index)
 			return -EOPNOTSUPP;
@@ -1672,6 +1747,9 @@
 			snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
 
 		dest_net = rtnl_link_get_net(net, tb);
+		if (IS_ERR(dest_net))
+			return PTR_ERR(dest_net);
+
 		dev = rtnl_create_link(net, dest_net, ifname, ops, tb);
 
 		if (IS_ERR(dev))
@@ -1820,7 +1898,7 @@
 	if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN))
 		return -EPERM;
 
-	if (kind == 2 && (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
+	if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
 		struct sock *rtnl;
 		rtnl_dumpit_func dumpit;
 
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d31bb36..14cf560 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -210,6 +210,7 @@
 	shinfo = skb_shinfo(skb);
 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
 	atomic_set(&shinfo->dataref, 1);
+	kmemcheck_annotate_variable(shinfo->destructor_arg);
 
 	if (fclone) {
 		struct sk_buff *child = skb + 1;
@@ -2497,7 +2498,7 @@
  *	a pointer to the first in a list of new skbs for the segments.
  *	In case of error it returns ERR_PTR(err).
  */
-struct sk_buff *skb_segment(struct sk_buff *skb, int features)
+struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
 {
 	struct sk_buff *segs = NULL;
 	struct sk_buff *tail = NULL;
@@ -2507,7 +2508,7 @@
 	unsigned int offset = doffset;
 	unsigned int headroom;
 	unsigned int len;
-	int sg = features & NETIF_F_SG;
+	int sg = !!(features & NETIF_F_SG);
 	int nfrags = skb_shinfo(skb)->nr_frags;
 	int err = -ENOMEM;
 	int i = 0;
@@ -2744,8 +2745,12 @@
 
 merge:
 	if (offset > headlen) {
-		skbinfo->frags[0].page_offset += offset - headlen;
-		skbinfo->frags[0].size -= offset - headlen;
+		unsigned int eat = offset - headlen;
+
+		skbinfo->frags[0].page_offset += eat;
+		skbinfo->frags[0].size -= eat;
+		skb->data_len -= eat;
+		skb->len -= eat;
 		offset = headlen;
 	}
 
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index d900ab9..d5074a5 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -583,7 +583,7 @@
 	u8 up, idtype;
 	int ret = -EINVAL;
 
-	if (!tb[DCB_ATTR_APP] || !netdev->dcbnl_ops->getapp)
+	if (!tb[DCB_ATTR_APP])
 		goto out;
 
 	ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
@@ -604,7 +604,16 @@
 		goto out;
 
 	id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
-	up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
+
+	if (netdev->dcbnl_ops->getapp) {
+		up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
+	} else {
+		struct dcb_app app = {
+					.selector = idtype,
+					.protocol = id,
+				     };
+		up = dcb_getapp(netdev, &app);
+	}
 
 	/* send this back */
 	dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
@@ -617,6 +626,9 @@
 	dcb->cmd = DCB_CMD_GAPP;
 
 	app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
+	if (!app_nest)
+		goto out_cancel;
+
 	ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
 	if (ret)
 		goto out_cancel;
@@ -1604,6 +1616,10 @@
 u8 dcb_setapp(struct net_device *dev, struct dcb_app *new)
 {
 	struct dcb_app_type *itr;
+	struct dcb_app_type event;
+
+	memcpy(&event.name, dev->name, sizeof(event.name));
+	memcpy(&event.app, new, sizeof(event.app));
 
 	spin_lock(&dcb_lock);
 	/* Search for existing match and replace */
@@ -1635,7 +1651,7 @@
 	}
 out:
 	spin_unlock(&dcb_lock);
-	call_dcbevent_notifiers(DCB_APP_EVENT, new);
+	call_dcbevent_notifiers(DCB_APP_EVENT, &event);
 	return 0;
 }
 EXPORT_SYMBOL(dcb_setapp);
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index e96d5e8..fadecd2 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -583,6 +583,15 @@
 	dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
 }
 
+/*
+ * Convert RFC 3390 larger initial window into an equivalent number of packets.
+ * This is based on the numbers specified in RFC 5681, 3.1.
+ */
+static inline u32 rfc3390_bytes_to_packets(const u32 smss)
+{
+	return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
+}
+
 static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
 {
 	struct ccid2_hc_tx_sock *hc = ccid_priv(ccid);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 5e63636..06c054d 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -112,6 +112,7 @@
 static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
 static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
 static unsigned int dn_dst_default_mtu(const struct dst_entry *dst);
+static void dn_dst_destroy(struct dst_entry *);
 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
 static void dn_dst_link_failure(struct sk_buff *);
 static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu);
@@ -133,11 +134,18 @@
 	.check =		dn_dst_check,
 	.default_advmss =	dn_dst_default_advmss,
 	.default_mtu =		dn_dst_default_mtu,
+	.cow_metrics =		dst_cow_metrics_generic,
+	.destroy =		dn_dst_destroy,
 	.negative_advice =	dn_dst_negative_advice,
 	.link_failure =		dn_dst_link_failure,
 	.update_pmtu =		dn_dst_update_pmtu,
 };
 
+static void dn_dst_destroy(struct dst_entry *dst)
+{
+	dst_destroy_metrics_generic(dst);
+}
+
 static __inline__ unsigned dn_hash(__le16 src, __le16 dst)
 {
 	__u16 tmp = (__u16 __force)(src ^ dst);
@@ -814,14 +822,14 @@
 {
 	struct dn_fib_info *fi = res->fi;
 	struct net_device *dev = rt->dst.dev;
+	unsigned int mss_metric;
 	struct neighbour *n;
-	unsigned int metric;
 
 	if (fi) {
 		if (DN_FIB_RES_GW(*res) &&
 		    DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
 			rt->rt_gateway = DN_FIB_RES_GW(*res);
-		dst_import_metrics(&rt->dst, fi->fib_metrics);
+		dst_init_metrics(&rt->dst, fi->fib_metrics, true);
 	}
 	rt->rt_type = res->type;
 
@@ -834,10 +842,10 @@
 
 	if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
 		dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu);
-	metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS);
-	if (metric) {
+	mss_metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS);
+	if (mss_metric) {
 		unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
-		if (metric > mss)
+		if (mss_metric > mss)
 			dst_metric_set(&rt->dst, RTAX_ADVMSS, mss);
 	}
 	return 0;
@@ -1114,7 +1122,7 @@
 	if (dev_out->flags & IFF_LOOPBACK)
 		flags |= RTCF_LOCAL;
 
-	rt = dst_alloc(&dn_dst_ops);
+	rt = dst_alloc(&dn_dst_ops, 0);
 	if (rt == NULL)
 		goto e_nobufs;
 
@@ -1375,7 +1383,7 @@
 	}
 
 make_route:
-	rt = dst_alloc(&dn_dst_ops);
+	rt = dst_alloc(&dn_dst_ops, 0);
 	if (rt == NULL)
 		goto e_nobufs;
 
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index f2abd37..b66600b 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -59,7 +59,6 @@
 };
 
 #define dz_key_0(key)		((key).datum = 0)
-#define dz_prefix(key,dz)	((key).datum)
 
 #define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\
 	for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 0c877a7..3fb14b7 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -428,7 +428,7 @@
 }
 module_exit(dsa_cleanup_module);
 
-MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>")
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
 MODULE_DESCRIPTION("Driver for Distributed Switch Architecture switch chips");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:dsa");
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 15dcc1a..0c28263 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -265,13 +265,13 @@
 static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
 			  struct msghdr *msg, size_t len)
 {
-	struct sock *sk = sock->sk;
 	struct sockaddr_ec *saddr=(struct sockaddr_ec *)msg->msg_name;
 	struct net_device *dev;
 	struct ec_addr addr;
 	int err;
 	unsigned char port, cb;
 #if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
+	struct sock *sk = sock->sk;
 	struct sk_buff *skb;
 	struct ec_cb *eb;
 #endif
@@ -488,10 +488,10 @@
 
 error_free_buf:
 	vfree(userbuf);
+error:
 #else
 	err = -EPROTOTYPE;
 #endif
-	error:
 	mutex_unlock(&econet_mutex);
 
 	return err;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index a5a1050..cbb505b 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -55,45 +55,9 @@
 
 	  If unsure, say N here.
 
-choice
-	prompt "Choose IP: FIB lookup algorithm (choose FIB_HASH if unsure)"
-	depends on IP_ADVANCED_ROUTER
-	default ASK_IP_FIB_HASH
-
-config ASK_IP_FIB_HASH
-	bool "FIB_HASH"
-	---help---
-	  Current FIB is very proven and good enough for most users.
-
-config IP_FIB_TRIE
-	bool "FIB_TRIE"
-	---help---
-	  Use new experimental LC-trie as FIB lookup algorithm.
-	  This improves lookup performance if you have a large
-	  number of routes.
-
-	  LC-trie is a longest matching prefix lookup algorithm which
-	  performs better than FIB_HASH for large routing tables.
-	  But, it consumes more memory and is more complex.
-
-	  LC-trie is described in:
-
-	  IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
-	  IEEE Journal on Selected Areas in Communications, 17(6):1083-1092,
-	  June 1999
-
-	  An experimental study of compression methods for dynamic tries
-	  Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
-	  <http://www.csc.kth.se/~snilsson/software/dyntrie2/>
-
-endchoice
-
-config IP_FIB_HASH
-	def_bool ASK_IP_FIB_HASH || !IP_ADVANCED_ROUTER
-
 config IP_FIB_TRIE_STATS
 	bool "FIB TRIE statistics"
-	depends on IP_FIB_TRIE
+	depends on IP_ADVANCED_ROUTER
 	---help---
 	  Keep track of statistics on structure of FIB TRIE table.
 	  Useful for testing and measuring TRIE performance.
@@ -140,6 +104,9 @@
 	  handled by the klogd daemon which is responsible for kernel messages
 	  ("man klogd").
 
+config IP_ROUTE_CLASSID
+	bool
+
 config IP_PNP
 	bool "IP: kernel level autoconfiguration"
 	help
@@ -657,4 +624,3 @@
 	  on the Internet.
 
 	  If unsure, say N.
-
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 4978d22..0dc772d 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -10,12 +10,10 @@
 	     tcp_minisocks.o tcp_cong.o \
 	     datagram.o raw.o udp.o udplite.o \
 	     arp.o icmp.o devinet.o af_inet.o  igmp.o \
-	     fib_frontend.o fib_semantics.o \
+	     fib_frontend.o fib_semantics.o fib_trie.o \
 	     inet_fragment.o
 
 obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
-obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o
-obj-$(CONFIG_IP_FIB_TRIE) += fib_trie.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
 obj-$(CONFIG_IP_MROUTE) += ipmr.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f2b6110..7ceb804 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -880,6 +880,19 @@
 }
 EXPORT_SYMBOL(inet_ioctl);
 
+#ifdef CONFIG_COMPAT
+int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+	struct sock *sk = sock->sk;
+	int err = -ENOIOCTLCMD;
+
+	if (sk->sk_prot->compat_ioctl)
+		err = sk->sk_prot->compat_ioctl(sk, cmd, arg);
+
+	return err;
+}
+#endif
+
 const struct proto_ops inet_stream_ops = {
 	.family		   = PF_INET,
 	.owner		   = THIS_MODULE,
@@ -903,6 +916,7 @@
 #ifdef CONFIG_COMPAT
 	.compat_setsockopt = compat_sock_common_setsockopt,
 	.compat_getsockopt = compat_sock_common_getsockopt,
+	.compat_ioctl	   = inet_compat_ioctl,
 #endif
 };
 EXPORT_SYMBOL(inet_stream_ops);
@@ -929,6 +943,7 @@
 #ifdef CONFIG_COMPAT
 	.compat_setsockopt = compat_sock_common_setsockopt,
 	.compat_getsockopt = compat_sock_common_getsockopt,
+	.compat_ioctl	   = inet_compat_ioctl,
 #endif
 };
 EXPORT_SYMBOL(inet_dgram_ops);
@@ -959,6 +974,7 @@
 #ifdef CONFIG_COMPAT
 	.compat_setsockopt = compat_sock_common_setsockopt,
 	.compat_getsockopt = compat_sock_common_getsockopt,
+	.compat_ioctl	   = inet_compat_ioctl,
 #endif
 };
 
@@ -1215,7 +1231,7 @@
 	return err;
 }
 
-static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
+static struct sk_buff *inet_gso_segment(struct sk_buff *skb, u32 features)
 {
 	struct sk_buff *segs = ERR_PTR(-EINVAL);
 	struct iphdr *iph;
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 86961be..325053d 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -201,7 +201,10 @@
 	top_iph->ttl = 0;
 	top_iph->check = 0;
 
-	ah->hdrlen  = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
+	if (x->props.flags & XFRM_STATE_ALIGN4)
+		ah->hdrlen  = (XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
+	else
+		ah->hdrlen  = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
 
 	ah->reserved = 0;
 	ah->spi = x->id.spi;
@@ -299,9 +302,15 @@
 	nexthdr = ah->nexthdr;
 	ah_hlen = (ah->hdrlen + 2) << 2;
 
-	if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
-	    ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
-		goto out;
+	if (x->props.flags & XFRM_STATE_ALIGN4) {
+		if (ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_full_len) &&
+		    ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len))
+			goto out;
+	} else {
+		if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
+		    ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
+			goto out;
+	}
 
 	if (!pskb_may_pull(skb, ah_hlen))
 		goto out;
@@ -450,8 +459,12 @@
 
 	BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
 
-	x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
-					  ahp->icv_trunc_len);
+	if (x->props.flags & XFRM_STATE_ALIGN4)
+		x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) +
+						  ahp->icv_trunc_len);
+	else
+		x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
+						  ahp->icv_trunc_len);
 	if (x->props.mode == XFRM_MODE_TUNNEL)
 		x->props.header_len += sizeof(struct iphdr);
 	x->data = ahp;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 04c8b69..7927589 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1017,14 +1017,13 @@
 		IPV4_DEVCONF_ALL(net, PROXY_ARP) = on;
 		return 0;
 	}
-	if (__in_dev_get_rcu(dev)) {
-		IN_DEV_CONF_SET(__in_dev_get_rcu(dev), PROXY_ARP, on);
+	if (__in_dev_get_rtnl(dev)) {
+		IN_DEV_CONF_SET(__in_dev_get_rtnl(dev), PROXY_ARP, on);
 		return 0;
 	}
 	return -ENXIO;
 }
 
-/* must be called with rcu_read_lock() */
 static int arp_req_set_public(struct net *net, struct arpreq *r,
 		struct net_device *dev)
 {
@@ -1233,10 +1232,10 @@
 	if (!(r.arp_flags & ATF_NETMASK))
 		((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr =
 							   htonl(0xFFFFFFFFUL);
-	rcu_read_lock();
+	rtnl_lock();
 	if (r.arp_dev[0]) {
 		err = -ENODEV;
-		dev = dev_get_by_name_rcu(net, r.arp_dev);
+		dev = __dev_get_by_name(net, r.arp_dev);
 		if (dev == NULL)
 			goto out;
 
@@ -1263,7 +1262,7 @@
 		break;
 	}
 out:
-	rcu_read_unlock();
+	rtnl_unlock();
 	if (cmd == SIOCGARP && !err && copy_to_user(arg, &r, sizeof(r)))
 		err = -EFAULT;
 	return err;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 748cb5b..9038928 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -51,6 +51,7 @@
 #include <linux/inetdevice.h>
 #include <linux/igmp.h>
 #include <linux/slab.h>
+#include <linux/hash.h>
 #ifdef CONFIG_SYSCTL
 #include <linux/sysctl.h>
 #endif
@@ -92,6 +93,71 @@
 	[IFA_LABEL]     	= { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
 };
 
+/* inet_addr_hash's shifting is dependent upon this IN4_ADDR_HSIZE
+ * value.  So if you change this define, make appropriate changes to
+ * inet_addr_hash as well.
+ */
+#define IN4_ADDR_HSIZE	256
+static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
+static DEFINE_SPINLOCK(inet_addr_hash_lock);
+
+static inline unsigned int inet_addr_hash(struct net *net, __be32 addr)
+{
+	u32 val = (__force u32) addr ^ hash_ptr(net, 8);
+
+	return ((val ^ (val >> 8) ^ (val >> 16) ^ (val >> 24)) &
+		(IN4_ADDR_HSIZE - 1));
+}
+
+static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
+{
+	unsigned int hash = inet_addr_hash(net, ifa->ifa_address);
+
+	spin_lock(&inet_addr_hash_lock);
+	hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
+	spin_unlock(&inet_addr_hash_lock);
+}
+
+static void inet_hash_remove(struct in_ifaddr *ifa)
+{
+	spin_lock(&inet_addr_hash_lock);
+	hlist_del_init_rcu(&ifa->hash);
+	spin_unlock(&inet_addr_hash_lock);
+}
+
+/**
+ * __ip_dev_find - find the first device with a given source address.
+ * @net: the net namespace
+ * @addr: the source address
+ * @devref: if true, take a reference on the found device
+ *
+ * If a caller uses devref=false, it should be protected by RCU, or RTNL
+ */
+struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
+{
+	unsigned int hash = inet_addr_hash(net, addr);
+	struct net_device *result = NULL;
+	struct in_ifaddr *ifa;
+	struct hlist_node *node;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(ifa, node, &inet_addr_lst[hash], hash) {
+		struct net_device *dev = ifa->ifa_dev->dev;
+
+		if (!net_eq(dev_net(dev), net))
+			continue;
+		if (ifa->ifa_address == addr) {
+			result = dev;
+			break;
+		}
+	}
+	if (result && devref)
+		dev_hold(result);
+	rcu_read_unlock();
+	return result;
+}
+EXPORT_SYMBOL(__ip_dev_find);
+
 static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
 
 static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
@@ -265,6 +331,7 @@
 			}
 
 			if (!do_promote) {
+				inet_hash_remove(ifa);
 				*ifap1 = ifa->ifa_next;
 
 				rtmsg_ifa(RTM_DELADDR, ifa, nlh, pid);
@@ -281,6 +348,7 @@
 	/* 2. Unlink it */
 
 	*ifap = ifa1->ifa_next;
+	inet_hash_remove(ifa1);
 
 	/* 3. Announce address deletion */
 
@@ -368,6 +436,8 @@
 	ifa->ifa_next = *ifap;
 	*ifap = ifa;
 
+	inet_hash_insert(dev_net(in_dev->dev), ifa);
+
 	/* Send message first, then call notifier.
 	   Notifier will trigger FIB update, so that
 	   listeners of netlink will know about new ifaddr */
@@ -521,6 +591,7 @@
 	if (tb[IFA_ADDRESS] == NULL)
 		tb[IFA_ADDRESS] = tb[IFA_LOCAL];
 
+	INIT_HLIST_NODE(&ifa->hash);
 	ifa->ifa_prefixlen = ifm->ifa_prefixlen;
 	ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
 	ifa->ifa_flags = ifm->ifa_flags;
@@ -728,6 +799,7 @@
 		if (!ifa) {
 			ret = -ENOBUFS;
 			ifa = inet_alloc_ifa();
+			INIT_HLIST_NODE(&ifa->hash);
 			if (!ifa)
 				break;
 			if (colon)
@@ -1030,6 +1102,21 @@
 	return mtu >= 68;
 }
 
+static void inetdev_send_gratuitous_arp(struct net_device *dev,
+					struct in_device *in_dev)
+
+{
+	struct in_ifaddr *ifa = in_dev->ifa_list;
+
+	if (!ifa)
+		return;
+
+	arp_send(ARPOP_REQUEST, ETH_P_ARP,
+		 ifa->ifa_address, dev,
+		 ifa->ifa_address, NULL,
+		 dev->dev_addr, NULL);
+}
+
 /* Called only under RTNL semaphore */
 
 static int inetdev_event(struct notifier_block *this, unsigned long event,
@@ -1069,6 +1156,7 @@
 			struct in_ifaddr *ifa = inet_alloc_ifa();
 
 			if (ifa) {
+				INIT_HLIST_NODE(&ifa->hash);
 				ifa->ifa_local =
 				  ifa->ifa_address = htonl(INADDR_LOOPBACK);
 				ifa->ifa_prefixlen = 8;
@@ -1082,18 +1170,13 @@
 		}
 		ip_mc_up(in_dev);
 		/* fall through */
-	case NETDEV_NOTIFY_PEERS:
 	case NETDEV_CHANGEADDR:
+		if (!IN_DEV_ARP_NOTIFY(in_dev))
+			break;
+		/* fall through */
+	case NETDEV_NOTIFY_PEERS:
 		/* Send gratuitous ARP to notify of link change */
-		if (IN_DEV_ARP_NOTIFY(in_dev)) {
-			struct in_ifaddr *ifa = in_dev->ifa_list;
-
-			if (ifa)
-				arp_send(ARPOP_REQUEST, ETH_P_ARP,
-					 ifa->ifa_address, dev,
-					 ifa->ifa_address, NULL,
-					 dev->dev_addr, NULL);
-		}
+		inetdev_send_gratuitous_arp(dev, in_dev);
 		break;
 	case NETDEV_DOWN:
 		ip_mc_down(in_dev);
@@ -1710,6 +1793,11 @@
 
 void __init devinet_init(void)
 {
+	int i;
+
+	for (i = 0; i < IN4_ADDR_HSIZE; i++)
+		INIT_HLIST_HEAD(&inet_addr_lst[i]);
+
 	register_pernet_subsys(&devinet_ops);
 
 	register_gifconf(PF_INET, inet_gifconf);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 1d2cdd4..ad0778a 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -51,11 +51,11 @@
 {
 	struct fib_table *local_table, *main_table;
 
-	local_table = fib_hash_table(RT_TABLE_LOCAL);
+	local_table = fib_trie_table(RT_TABLE_LOCAL);
 	if (local_table == NULL)
 		return -ENOMEM;
 
-	main_table  = fib_hash_table(RT_TABLE_MAIN);
+	main_table  = fib_trie_table(RT_TABLE_MAIN);
 	if (main_table == NULL)
 		goto fail;
 
@@ -82,7 +82,7 @@
 	if (tb)
 		return tb;
 
-	tb = fib_hash_table(id);
+	tb = fib_trie_table(id);
 	if (!tb)
 		return NULL;
 	h = id & (FIB_TABLE_HASHSZ - 1);
@@ -114,21 +114,6 @@
 }
 #endif /* CONFIG_IP_MULTIPLE_TABLES */
 
-void fib_select_default(struct net *net,
-			const struct flowi *flp, struct fib_result *res)
-{
-	struct fib_table *tb;
-	int table = RT_TABLE_MAIN;
-#ifdef CONFIG_IP_MULTIPLE_TABLES
-	if (res->r == NULL || res->r->action != FR_ACT_TO_TBL)
-		return;
-	table = res->r->table;
-#endif
-	tb = fib_get_table(net, table);
-	if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
-		fib_table_select_default(tb, flp, res);
-}
-
 static void fib_flush(struct net *net)
 {
 	int flushed = 0;
@@ -147,46 +132,6 @@
 		rt_cache_flush(net, -1);
 }
 
-/**
- * __ip_dev_find - find the first device with a given source address.
- * @net: the net namespace
- * @addr: the source address
- * @devref: if true, take a reference on the found device
- *
- * If a caller uses devref=false, it should be protected by RCU, or RTNL
- */
-struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
-{
-	struct flowi fl = {
-		.fl4_dst = addr,
-	};
-	struct fib_result res = { 0 };
-	struct net_device *dev = NULL;
-	struct fib_table *local_table;
-
-#ifdef CONFIG_IP_MULTIPLE_TABLES
-	res.r = NULL;
-#endif
-
-	rcu_read_lock();
-	local_table = fib_get_table(net, RT_TABLE_LOCAL);
-	if (!local_table ||
-	    fib_table_lookup(local_table, &fl, &res, FIB_LOOKUP_NOREF)) {
-		rcu_read_unlock();
-		return NULL;
-	}
-	if (res.type != RTN_LOCAL)
-		goto out;
-	dev = FIB_RES_DEV(res);
-
-	if (dev && devref)
-		dev_hold(dev);
-out:
-	rcu_read_unlock();
-	return dev;
-}
-EXPORT_SYMBOL(__ip_dev_find);
-
 /*
  * Find address type as if only "dev" was present in the system. If
  * on_dev is NULL then all interfaces are taken into consideration.
@@ -1101,5 +1046,5 @@
 	register_netdevice_notifier(&fib_netdev_notifier);
 	register_inetaddr_notifier(&fib_inetaddr_notifier);
 
-	fib_hash_init();
+	fib_trie_init();
 }
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
deleted file mode 100644
index b3acb04..0000000
--- a/net/ipv4/fib_hash.c
+++ /dev/null
@@ -1,1133 +0,0 @@
-/*
- * INET		An implementation of the TCP/IP protocol suite for the LINUX
- *		operating system.  INET is implemented using the  BSD Socket
- *		interface as the means of communication with the user level.
- *
- *		IPv4 FIB: lookup engine and maintenance routines.
- *
- * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
- *
- *		This program is free software; you can redistribute it and/or
- *		modify it under the terms of the GNU General Public License
- *		as published by the Free Software Foundation; either version
- *		2 of the License, or (at your option) any later version.
- */
-
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/errno.h>
-#include <linux/in.h>
-#include <linux/inet.h>
-#include <linux/inetdevice.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <linux/proc_fs.h>
-#include <linux/skbuff.h>
-#include <linux/netlink.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-
-#include <net/net_namespace.h>
-#include <net/ip.h>
-#include <net/protocol.h>
-#include <net/route.h>
-#include <net/tcp.h>
-#include <net/sock.h>
-#include <net/ip_fib.h>
-
-#include "fib_lookup.h"
-
-static struct kmem_cache *fn_hash_kmem __read_mostly;
-static struct kmem_cache *fn_alias_kmem __read_mostly;
-
-struct fib_node {
-	struct hlist_node	fn_hash;
-	struct list_head	fn_alias;
-	__be32			fn_key;
-	struct fib_alias        fn_embedded_alias;
-};
-
-#define EMBEDDED_HASH_SIZE (L1_CACHE_BYTES / sizeof(struct hlist_head))
-
-struct fn_zone {
-	struct fn_zone __rcu	*fz_next;	/* Next not empty zone	*/
-	struct hlist_head __rcu	*fz_hash;	/* Hash table pointer	*/
-	seqlock_t		fz_lock;
-	u32			fz_hashmask;	/* (fz_divisor - 1)	*/
-
-	u8			fz_order;	/* Zone order (0..32)	*/
-	u8			fz_revorder;	/* 32 - fz_order	*/
-	__be32			fz_mask;	/* inet_make_mask(order) */
-#define FZ_MASK(fz)		((fz)->fz_mask)
-
-	struct hlist_head	fz_embedded_hash[EMBEDDED_HASH_SIZE];
-
-	int			fz_nent;	/* Number of entries	*/
-	int			fz_divisor;	/* Hash size (mask+1)	*/
-};
-
-struct fn_hash {
-	struct fn_zone		*fn_zones[33];
-	struct fn_zone __rcu	*fn_zone_list;
-};
-
-static inline u32 fn_hash(__be32 key, struct fn_zone *fz)
-{
-	u32 h = ntohl(key) >> fz->fz_revorder;
-	h ^= (h>>20);
-	h ^= (h>>10);
-	h ^= (h>>5);
-	h &= fz->fz_hashmask;
-	return h;
-}
-
-static inline __be32 fz_key(__be32 dst, struct fn_zone *fz)
-{
-	return dst & FZ_MASK(fz);
-}
-
-static unsigned int fib_hash_genid;
-
-#define FZ_MAX_DIVISOR ((PAGE_SIZE<<MAX_ORDER) / sizeof(struct hlist_head))
-
-static struct hlist_head *fz_hash_alloc(int divisor)
-{
-	unsigned long size = divisor * sizeof(struct hlist_head);
-
-	if (size <= PAGE_SIZE)
-		return kzalloc(size, GFP_KERNEL);
-
-	return (struct hlist_head *)
-		__get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(size));
-}
-
-/* The fib hash lock must be held when this is called. */
-static inline void fn_rebuild_zone(struct fn_zone *fz,
-				   struct hlist_head *old_ht,
-				   int old_divisor)
-{
-	int i;
-
-	for (i = 0; i < old_divisor; i++) {
-		struct hlist_node *node, *n;
-		struct fib_node *f;
-
-		hlist_for_each_entry_safe(f, node, n, &old_ht[i], fn_hash) {
-			struct hlist_head *new_head;
-
-			hlist_del_rcu(&f->fn_hash);
-
-			new_head = rcu_dereference_protected(fz->fz_hash, 1) +
-				   fn_hash(f->fn_key, fz);
-			hlist_add_head_rcu(&f->fn_hash, new_head);
-		}
-	}
-}
-
-static void fz_hash_free(struct hlist_head *hash, int divisor)
-{
-	unsigned long size = divisor * sizeof(struct hlist_head);
-
-	if (size <= PAGE_SIZE)
-		kfree(hash);
-	else
-		free_pages((unsigned long)hash, get_order(size));
-}
-
-static void fn_rehash_zone(struct fn_zone *fz)
-{
-	struct hlist_head *ht, *old_ht;
-	int old_divisor, new_divisor;
-	u32 new_hashmask;
-
-	new_divisor = old_divisor = fz->fz_divisor;
-
-	switch (old_divisor) {
-	case EMBEDDED_HASH_SIZE:
-		new_divisor *= EMBEDDED_HASH_SIZE;
-		break;
-	case EMBEDDED_HASH_SIZE*EMBEDDED_HASH_SIZE:
-		new_divisor *= (EMBEDDED_HASH_SIZE/2);
-		break;
-	default:
-		if ((old_divisor << 1) > FZ_MAX_DIVISOR) {
-			printk(KERN_CRIT "route.c: bad divisor %d!\n", old_divisor);
-			return;
-		}
-		new_divisor = (old_divisor << 1);
-		break;
-	}
-
-	new_hashmask = (new_divisor - 1);
-
-#if RT_CACHE_DEBUG >= 2
-	printk(KERN_DEBUG "fn_rehash_zone: hash for zone %d grows from %d\n",
-	       fz->fz_order, old_divisor);
-#endif
-
-	ht = fz_hash_alloc(new_divisor);
-
-	if (ht)	{
-		struct fn_zone nfz;
-
-		memcpy(&nfz, fz, sizeof(nfz));
-
-		write_seqlock_bh(&fz->fz_lock);
-		old_ht = rcu_dereference_protected(fz->fz_hash, 1);
-		RCU_INIT_POINTER(nfz.fz_hash, ht);
-		nfz.fz_hashmask = new_hashmask;
-		nfz.fz_divisor = new_divisor;
-		fn_rebuild_zone(&nfz, old_ht, old_divisor);
-		fib_hash_genid++;
-		rcu_assign_pointer(fz->fz_hash, ht);
-		fz->fz_hashmask = new_hashmask;
-		fz->fz_divisor = new_divisor;
-		write_sequnlock_bh(&fz->fz_lock);
-
-		if (old_ht != fz->fz_embedded_hash) {
-			synchronize_rcu();
-			fz_hash_free(old_ht, old_divisor);
-		}
-	}
-}
-
-static void fn_free_node_rcu(struct rcu_head *head)
-{
-	struct fib_node *f = container_of(head, struct fib_node, fn_embedded_alias.rcu);
-
-	kmem_cache_free(fn_hash_kmem, f);
-}
-
-static inline void fn_free_node(struct fib_node *f)
-{
-	call_rcu(&f->fn_embedded_alias.rcu, fn_free_node_rcu);
-}
-
-static void fn_free_alias_rcu(struct rcu_head *head)
-{
-	struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
-
-	kmem_cache_free(fn_alias_kmem, fa);
-}
-
-static inline void fn_free_alias(struct fib_alias *fa, struct fib_node *f)
-{
-	fib_release_info(fa->fa_info);
-	if (fa == &f->fn_embedded_alias)
-		fa->fa_info = NULL;
-	else
-		call_rcu(&fa->rcu, fn_free_alias_rcu);
-}
-
-static struct fn_zone *
-fn_new_zone(struct fn_hash *table, int z)
-{
-	int i;
-	struct fn_zone *fz = kzalloc(sizeof(struct fn_zone), GFP_KERNEL);
-	if (!fz)
-		return NULL;
-
-	seqlock_init(&fz->fz_lock);
-	fz->fz_divisor = z ? EMBEDDED_HASH_SIZE : 1;
-	fz->fz_hashmask = fz->fz_divisor - 1;
-	RCU_INIT_POINTER(fz->fz_hash, fz->fz_embedded_hash);
-	fz->fz_order = z;
-	fz->fz_revorder = 32 - z;
-	fz->fz_mask = inet_make_mask(z);
-
-	/* Find the first not empty zone with more specific mask */
-	for (i = z + 1; i <= 32; i++)
-		if (table->fn_zones[i])
-			break;
-	if (i > 32) {
-		/* No more specific masks, we are the first. */
-		rcu_assign_pointer(fz->fz_next,
-				   rtnl_dereference(table->fn_zone_list));
-		rcu_assign_pointer(table->fn_zone_list, fz);
-	} else {
-		rcu_assign_pointer(fz->fz_next,
-				   rtnl_dereference(table->fn_zones[i]->fz_next));
-		rcu_assign_pointer(table->fn_zones[i]->fz_next, fz);
-	}
-	table->fn_zones[z] = fz;
-	fib_hash_genid++;
-	return fz;
-}
-
-int fib_table_lookup(struct fib_table *tb,
-		     const struct flowi *flp, struct fib_result *res,
-		     int fib_flags)
-{
-	int err;
-	struct fn_zone *fz;
-	struct fn_hash *t = (struct fn_hash *)tb->tb_data;
-
-	rcu_read_lock();
-	for (fz = rcu_dereference(t->fn_zone_list);
-	     fz != NULL;
-	     fz = rcu_dereference(fz->fz_next)) {
-		struct hlist_head *head;
-		struct hlist_node *node;
-		struct fib_node *f;
-		__be32 k;
-		unsigned int seq;
-
-		do {
-			seq = read_seqbegin(&fz->fz_lock);
-			k = fz_key(flp->fl4_dst, fz);
-
-			head = rcu_dereference(fz->fz_hash) + fn_hash(k, fz);
-			hlist_for_each_entry_rcu(f, node, head, fn_hash) {
-				if (f->fn_key != k)
-					continue;
-
-				err = fib_semantic_match(&f->fn_alias,
-						 flp, res,
-						 fz->fz_order, fib_flags);
-				if (err <= 0)
-					goto out;
-			}
-		} while (read_seqretry(&fz->fz_lock, seq));
-	}
-	err = 1;
-out:
-	rcu_read_unlock();
-	return err;
-}
-
-void fib_table_select_default(struct fib_table *tb,
-			      const struct flowi *flp, struct fib_result *res)
-{
-	int order, last_idx;
-	struct hlist_node *node;
-	struct fib_node *f;
-	struct fib_info *fi = NULL;
-	struct fib_info *last_resort;
-	struct fn_hash *t = (struct fn_hash *)tb->tb_data;
-	struct fn_zone *fz = t->fn_zones[0];
-	struct hlist_head *head;
-
-	if (fz == NULL)
-		return;
-
-	last_idx = -1;
-	last_resort = NULL;
-	order = -1;
-
-	rcu_read_lock();
-	head = rcu_dereference(fz->fz_hash);
-	hlist_for_each_entry_rcu(f, node, head, fn_hash) {
-		struct fib_alias *fa;
-
-		list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
-			struct fib_info *next_fi = fa->fa_info;
-
-			if (fa->fa_scope != res->scope ||
-			    fa->fa_type != RTN_UNICAST)
-				continue;
-
-			if (next_fi->fib_priority > res->fi->fib_priority)
-				break;
-			if (!next_fi->fib_nh[0].nh_gw ||
-			    next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
-				continue;
-
-			fib_alias_accessed(fa);
-
-			if (fi == NULL) {
-				if (next_fi != res->fi)
-					break;
-			} else if (!fib_detect_death(fi, order, &last_resort,
-						&last_idx, tb->tb_default)) {
-				fib_result_assign(res, fi);
-				tb->tb_default = order;
-				goto out;
-			}
-			fi = next_fi;
-			order++;
-		}
-	}
-
-	if (order <= 0 || fi == NULL) {
-		tb->tb_default = -1;
-		goto out;
-	}
-
-	if (!fib_detect_death(fi, order, &last_resort, &last_idx,
-				tb->tb_default)) {
-		fib_result_assign(res, fi);
-		tb->tb_default = order;
-		goto out;
-	}
-
-	if (last_idx >= 0)
-		fib_result_assign(res, last_resort);
-	tb->tb_default = last_idx;
-out:
-	rcu_read_unlock();
-}
-
-/* Insert node F to FZ. */
-static inline void fib_insert_node(struct fn_zone *fz, struct fib_node *f)
-{
-	struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(f->fn_key, fz);
-
-	hlist_add_head_rcu(&f->fn_hash, head);
-}
-
-/* Return the node in FZ matching KEY. */
-static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key)
-{
-	struct hlist_head *head = rtnl_dereference(fz->fz_hash) + fn_hash(key, fz);
-	struct hlist_node *node;
-	struct fib_node *f;
-
-	hlist_for_each_entry_rcu(f, node, head, fn_hash) {
-		if (f->fn_key == key)
-			return f;
-	}
-
-	return NULL;
-}
-
-
-static struct fib_alias *fib_fast_alloc(struct fib_node *f)
-{
-	struct fib_alias *fa = &f->fn_embedded_alias;
-
-	if (fa->fa_info != NULL)
-		fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL);
-	return fa;
-}
-
-/* Caller must hold RTNL. */
-int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
-{
-	struct fn_hash *table = (struct fn_hash *) tb->tb_data;
-	struct fib_node *new_f = NULL;
-	struct fib_node *f;
-	struct fib_alias *fa, *new_fa;
-	struct fn_zone *fz;
-	struct fib_info *fi;
-	u8 tos = cfg->fc_tos;
-	__be32 key;
-	int err;
-
-	if (cfg->fc_dst_len > 32)
-		return -EINVAL;
-
-	fz = table->fn_zones[cfg->fc_dst_len];
-	if (!fz && !(fz = fn_new_zone(table, cfg->fc_dst_len)))
-		return -ENOBUFS;
-
-	key = 0;
-	if (cfg->fc_dst) {
-		if (cfg->fc_dst & ~FZ_MASK(fz))
-			return -EINVAL;
-		key = fz_key(cfg->fc_dst, fz);
-	}
-
-	fi = fib_create_info(cfg);
-	if (IS_ERR(fi))
-		return PTR_ERR(fi);
-
-	if (fz->fz_nent > (fz->fz_divisor<<1) &&
-	    fz->fz_divisor < FZ_MAX_DIVISOR &&
-	    (cfg->fc_dst_len == 32 ||
-	     (1 << cfg->fc_dst_len) > fz->fz_divisor))
-		fn_rehash_zone(fz);
-
-	f = fib_find_node(fz, key);
-
-	if (!f)
-		fa = NULL;
-	else
-		fa = fib_find_alias(&f->fn_alias, tos, fi->fib_priority);
-
-	/* Now fa, if non-NULL, points to the first fib alias
-	 * with the same keys [prefix,tos,priority], if such key already
-	 * exists or to the node before which we will insert new one.
-	 *
-	 * If fa is NULL, we will need to allocate a new one and
-	 * insert to the head of f.
-	 *
-	 * If f is NULL, no fib node matched the destination key
-	 * and we need to allocate a new one of those as well.
-	 */
-
-	if (fa && fa->fa_tos == tos &&
-	    fa->fa_info->fib_priority == fi->fib_priority) {
-		struct fib_alias *fa_first, *fa_match;
-
-		err = -EEXIST;
-		if (cfg->fc_nlflags & NLM_F_EXCL)
-			goto out;
-
-		/* We have 2 goals:
-		 * 1. Find exact match for type, scope, fib_info to avoid
-		 * duplicate routes
-		 * 2. Find next 'fa' (or head), NLM_F_APPEND inserts before it
-		 */
-		fa_match = NULL;
-		fa_first = fa;
-		fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
-		list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
-			if (fa->fa_tos != tos)
-				break;
-			if (fa->fa_info->fib_priority != fi->fib_priority)
-				break;
-			if (fa->fa_type == cfg->fc_type &&
-			    fa->fa_scope == cfg->fc_scope &&
-			    fa->fa_info == fi) {
-				fa_match = fa;
-				break;
-			}
-		}
-
-		if (cfg->fc_nlflags & NLM_F_REPLACE) {
-			u8 state;
-
-			fa = fa_first;
-			if (fa_match) {
-				if (fa == fa_match)
-					err = 0;
-				goto out;
-			}
-			err = -ENOBUFS;
-			new_fa = fib_fast_alloc(f);
-			if (new_fa == NULL)
-				goto out;
-
-			new_fa->fa_tos = fa->fa_tos;
-			new_fa->fa_info = fi;
-			new_fa->fa_type = cfg->fc_type;
-			new_fa->fa_scope = cfg->fc_scope;
-			state = fa->fa_state;
-			new_fa->fa_state = state & ~FA_S_ACCESSED;
-			fib_hash_genid++;
-			list_replace_rcu(&fa->fa_list, &new_fa->fa_list);
-
-			fn_free_alias(fa, f);
-			if (state & FA_S_ACCESSED)
-				rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
-			rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len,
-				  tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE);
-			return 0;
-		}
-
-		/* Error if we find a perfect match which
-		 * uses the same scope, type, and nexthop
-		 * information.
-		 */
-		if (fa_match)
-			goto out;
-
-		if (!(cfg->fc_nlflags & NLM_F_APPEND))
-			fa = fa_first;
-	}
-
-	err = -ENOENT;
-	if (!(cfg->fc_nlflags & NLM_F_CREATE))
-		goto out;
-
-	err = -ENOBUFS;
-
-	if (!f) {
-		new_f = kmem_cache_zalloc(fn_hash_kmem, GFP_KERNEL);
-		if (new_f == NULL)
-			goto out;
-
-		INIT_HLIST_NODE(&new_f->fn_hash);
-		INIT_LIST_HEAD(&new_f->fn_alias);
-		new_f->fn_key = key;
-		f = new_f;
-	}
-
-	new_fa = fib_fast_alloc(f);
-	if (new_fa == NULL)
-		goto out;
-
-	new_fa->fa_info = fi;
-	new_fa->fa_tos = tos;
-	new_fa->fa_type = cfg->fc_type;
-	new_fa->fa_scope = cfg->fc_scope;
-	new_fa->fa_state = 0;
-
-	/*
-	 * Insert new entry to the list.
-	 */
-
-	if (new_f)
-		fib_insert_node(fz, new_f);
-	list_add_tail_rcu(&new_fa->fa_list,
-		 (fa ? &fa->fa_list : &f->fn_alias));
-	fib_hash_genid++;
-
-	if (new_f)
-		fz->fz_nent++;
-	rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
-
-	rtmsg_fib(RTM_NEWROUTE, key, new_fa, cfg->fc_dst_len, tb->tb_id,
-		  &cfg->fc_nlinfo, 0);
-	return 0;
-
-out:
-	if (new_f)
-		kmem_cache_free(fn_hash_kmem, new_f);
-	fib_release_info(fi);
-	return err;
-}
-
-int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
-{
-	struct fn_hash *table = (struct fn_hash *)tb->tb_data;
-	struct fib_node *f;
-	struct fib_alias *fa, *fa_to_delete;
-	struct fn_zone *fz;
-	__be32 key;
-
-	if (cfg->fc_dst_len > 32)
-		return -EINVAL;
-
-	if ((fz  = table->fn_zones[cfg->fc_dst_len]) == NULL)
-		return -ESRCH;
-
-	key = 0;
-	if (cfg->fc_dst) {
-		if (cfg->fc_dst & ~FZ_MASK(fz))
-			return -EINVAL;
-		key = fz_key(cfg->fc_dst, fz);
-	}
-
-	f = fib_find_node(fz, key);
-
-	if (!f)
-		fa = NULL;
-	else
-		fa = fib_find_alias(&f->fn_alias, cfg->fc_tos, 0);
-	if (!fa)
-		return -ESRCH;
-
-	fa_to_delete = NULL;
-	fa = list_entry(fa->fa_list.prev, struct fib_alias, fa_list);
-	list_for_each_entry_continue(fa, &f->fn_alias, fa_list) {
-		struct fib_info *fi = fa->fa_info;
-
-		if (fa->fa_tos != cfg->fc_tos)
-			break;
-
-		if ((!cfg->fc_type ||
-		     fa->fa_type == cfg->fc_type) &&
-		    (cfg->fc_scope == RT_SCOPE_NOWHERE ||
-		     fa->fa_scope == cfg->fc_scope) &&
-		    (!cfg->fc_protocol ||
-		     fi->fib_protocol == cfg->fc_protocol) &&
-		    fib_nh_match(cfg, fi) == 0) {
-			fa_to_delete = fa;
-			break;
-		}
-	}
-
-	if (fa_to_delete) {
-		int kill_fn;
-
-		fa = fa_to_delete;
-		rtmsg_fib(RTM_DELROUTE, key, fa, cfg->fc_dst_len,
-			  tb->tb_id, &cfg->fc_nlinfo, 0);
-
-		kill_fn = 0;
-		list_del_rcu(&fa->fa_list);
-		if (list_empty(&f->fn_alias)) {
-			hlist_del_rcu(&f->fn_hash);
-			kill_fn = 1;
-		}
-		fib_hash_genid++;
-
-		if (fa->fa_state & FA_S_ACCESSED)
-			rt_cache_flush(cfg->fc_nlinfo.nl_net, -1);
-		fn_free_alias(fa, f);
-		if (kill_fn) {
-			fn_free_node(f);
-			fz->fz_nent--;
-		}
-
-		return 0;
-	}
-	return -ESRCH;
-}
-
-static int fn_flush_list(struct fn_zone *fz, int idx)
-{
-	struct hlist_head *head = rtnl_dereference(fz->fz_hash) + idx;
-	struct hlist_node *node, *n;
-	struct fib_node *f;
-	int found = 0;
-
-	hlist_for_each_entry_safe(f, node, n, head, fn_hash) {
-		struct fib_alias *fa, *fa_node;
-		int kill_f;
-
-		kill_f = 0;
-		list_for_each_entry_safe(fa, fa_node, &f->fn_alias, fa_list) {
-			struct fib_info *fi = fa->fa_info;
-
-			if (fi && (fi->fib_flags&RTNH_F_DEAD)) {
-				list_del_rcu(&fa->fa_list);
-				if (list_empty(&f->fn_alias)) {
-					hlist_del_rcu(&f->fn_hash);
-					kill_f = 1;
-				}
-				fib_hash_genid++;
-
-				fn_free_alias(fa, f);
-				found++;
-			}
-		}
-		if (kill_f) {
-			fn_free_node(f);
-			fz->fz_nent--;
-		}
-	}
-	return found;
-}
-
-/* caller must hold RTNL. */
-int fib_table_flush(struct fib_table *tb)
-{
-	struct fn_hash *table = (struct fn_hash *) tb->tb_data;
-	struct fn_zone *fz;
-	int found = 0;
-
-	for (fz = rtnl_dereference(table->fn_zone_list);
-	     fz != NULL;
-	     fz = rtnl_dereference(fz->fz_next)) {
-		int i;
-
-		for (i = fz->fz_divisor - 1; i >= 0; i--)
-			found += fn_flush_list(fz, i);
-	}
-	return found;
-}
-
-void fib_free_table(struct fib_table *tb)
-{
-	struct fn_hash *table = (struct fn_hash *) tb->tb_data;
-	struct fn_zone *fz, *next;
-
-	next = table->fn_zone_list;
-	while (next != NULL) {
-		fz = next;
-		next = fz->fz_next;
-
-		if (fz->fz_hash != fz->fz_embedded_hash)
-			fz_hash_free(fz->fz_hash, fz->fz_divisor);
-
-		kfree(fz);
-	}
-
-	kfree(tb);
-}
-
-static inline int
-fn_hash_dump_bucket(struct sk_buff *skb, struct netlink_callback *cb,
-		     struct fib_table *tb,
-		     struct fn_zone *fz,
-		     struct hlist_head *head)
-{
-	struct hlist_node *node;
-	struct fib_node *f;
-	int i, s_i;
-
-	s_i = cb->args[4];
-	i = 0;
-	hlist_for_each_entry_rcu(f, node, head, fn_hash) {
-		struct fib_alias *fa;
-
-		list_for_each_entry_rcu(fa, &f->fn_alias, fa_list) {
-			if (i < s_i)
-				goto next;
-
-			if (fib_dump_info(skb, NETLINK_CB(cb->skb).pid,
-					  cb->nlh->nlmsg_seq,
-					  RTM_NEWROUTE,
-					  tb->tb_id,
-					  fa->fa_type,
-					  fa->fa_scope,
-					  f->fn_key,
-					  fz->fz_order,
-					  fa->fa_tos,
-					  fa->fa_info,
-					  NLM_F_MULTI) < 0) {
-				cb->args[4] = i;
-				return -1;
-			}
-next:
-			i++;
-		}
-	}
-	cb->args[4] = i;
-	return skb->len;
-}
-
-static inline int
-fn_hash_dump_zone(struct sk_buff *skb, struct netlink_callback *cb,
-		   struct fib_table *tb,
-		   struct fn_zone *fz)
-{
-	int h, s_h;
-	struct hlist_head *head = rcu_dereference(fz->fz_hash);
-
-	if (head == NULL)
-		return skb->len;
-	s_h = cb->args[3];
-	for (h = s_h; h < fz->fz_divisor; h++) {
-		if (hlist_empty(head + h))
-			continue;
-		if (fn_hash_dump_bucket(skb, cb, tb, fz, head + h) < 0) {
-			cb->args[3] = h;
-			return -1;
-		}
-		memset(&cb->args[4], 0,
-		       sizeof(cb->args) - 4*sizeof(cb->args[0]));
-	}
-	cb->args[3] = h;
-	return skb->len;
-}
-
-int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
-		   struct netlink_callback *cb)
-{
-	int m = 0, s_m;
-	struct fn_zone *fz;
-	struct fn_hash *table = (struct fn_hash *)tb->tb_data;
-
-	s_m = cb->args[2];
-	rcu_read_lock();
-	for (fz = rcu_dereference(table->fn_zone_list);
-	     fz != NULL;
-	     fz = rcu_dereference(fz->fz_next), m++) {
-		if (m < s_m)
-			continue;
-		if (fn_hash_dump_zone(skb, cb, tb, fz) < 0) {
-			cb->args[2] = m;
-			rcu_read_unlock();
-			return -1;
-		}
-		memset(&cb->args[3], 0,
-		       sizeof(cb->args) - 3*sizeof(cb->args[0]));
-	}
-	rcu_read_unlock();
-	cb->args[2] = m;
-	return skb->len;
-}
-
-void __init fib_hash_init(void)
-{
-	fn_hash_kmem = kmem_cache_create("ip_fib_hash", sizeof(struct fib_node),
-					 0, SLAB_PANIC, NULL);
-
-	fn_alias_kmem = kmem_cache_create("ip_fib_alias", sizeof(struct fib_alias),
-					  0, SLAB_PANIC, NULL);
-
-}
-
-struct fib_table *fib_hash_table(u32 id)
-{
-	struct fib_table *tb;
-
-	tb = kmalloc(sizeof(struct fib_table) + sizeof(struct fn_hash),
-		     GFP_KERNEL);
-	if (tb == NULL)
-		return NULL;
-
-	tb->tb_id = id;
-	tb->tb_default = -1;
-
-	memset(tb->tb_data, 0, sizeof(struct fn_hash));
-	return tb;
-}
-
-/* ------------------------------------------------------------------------ */
-#ifdef CONFIG_PROC_FS
-
-struct fib_iter_state {
-	struct seq_net_private p;
-	struct fn_zone	*zone;
-	int		bucket;
-	struct hlist_head *hash_head;
-	struct fib_node *fn;
-	struct fib_alias *fa;
-	loff_t pos;
-	unsigned int genid;
-	int valid;
-};
-
-static struct fib_alias *fib_get_first(struct seq_file *seq)
-{
-	struct fib_iter_state *iter = seq->private;
-	struct fib_table *main_table;
-	struct fn_hash *table;
-
-	main_table = fib_get_table(seq_file_net(seq), RT_TABLE_MAIN);
-	table = (struct fn_hash *)main_table->tb_data;
-
-	iter->bucket    = 0;
-	iter->hash_head = NULL;
-	iter->fn        = NULL;
-	iter->fa        = NULL;
-	iter->pos	= 0;
-	iter->genid	= fib_hash_genid;
-	iter->valid	= 1;
-
-	for (iter->zone = rcu_dereference(table->fn_zone_list);
-	     iter->zone != NULL;
-	     iter->zone = rcu_dereference(iter->zone->fz_next)) {
-		int maxslot;
-
-		if (!iter->zone->fz_nent)
-			continue;
-
-		iter->hash_head = rcu_dereference(iter->zone->fz_hash);
-		maxslot = iter->zone->fz_divisor;
-
-		for (iter->bucket = 0; iter->bucket < maxslot;
-		     ++iter->bucket, ++iter->hash_head) {
-			struct hlist_node *node;
-			struct fib_node *fn;
-
-			hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
-				struct fib_alias *fa;
-
-				list_for_each_entry(fa, &fn->fn_alias, fa_list) {
-					iter->fn = fn;
-					iter->fa = fa;
-					goto out;
-				}
-			}
-		}
-	}
-out:
-	return iter->fa;
-}
-
-static struct fib_alias *fib_get_next(struct seq_file *seq)
-{
-	struct fib_iter_state *iter = seq->private;
-	struct fib_node *fn;
-	struct fib_alias *fa;
-
-	/* Advance FA, if any. */
-	fn = iter->fn;
-	fa = iter->fa;
-	if (fa) {
-		BUG_ON(!fn);
-		list_for_each_entry_continue(fa, &fn->fn_alias, fa_list) {
-			iter->fa = fa;
-			goto out;
-		}
-	}
-
-	fa = iter->fa = NULL;
-
-	/* Advance FN. */
-	if (fn) {
-		struct hlist_node *node = &fn->fn_hash;
-		hlist_for_each_entry_continue(fn, node, fn_hash) {
-			iter->fn = fn;
-
-			list_for_each_entry(fa, &fn->fn_alias, fa_list) {
-				iter->fa = fa;
-				goto out;
-			}
-		}
-	}
-
-	fn = iter->fn = NULL;
-
-	/* Advance hash chain. */
-	if (!iter->zone)
-		goto out;
-
-	for (;;) {
-		struct hlist_node *node;
-		int maxslot;
-
-		maxslot = iter->zone->fz_divisor;
-
-		while (++iter->bucket < maxslot) {
-			iter->hash_head++;
-
-			hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
-				list_for_each_entry(fa, &fn->fn_alias, fa_list) {
-					iter->fn = fn;
-					iter->fa = fa;
-					goto out;
-				}
-			}
-		}
-
-		iter->zone = rcu_dereference(iter->zone->fz_next);
-
-		if (!iter->zone)
-			goto out;
-
-		iter->bucket = 0;
-		iter->hash_head = rcu_dereference(iter->zone->fz_hash);
-
-		hlist_for_each_entry(fn, node, iter->hash_head, fn_hash) {
-			list_for_each_entry(fa, &fn->fn_alias, fa_list) {
-				iter->fn = fn;
-				iter->fa = fa;
-				goto out;
-			}
-		}
-	}
-out:
-	iter->pos++;
-	return fa;
-}
-
-static struct fib_alias *fib_get_idx(struct seq_file *seq, loff_t pos)
-{
-	struct fib_iter_state *iter = seq->private;
-	struct fib_alias *fa;
-
-	if (iter->valid && pos >= iter->pos && iter->genid == fib_hash_genid) {
-		fa   = iter->fa;
-		pos -= iter->pos;
-	} else
-		fa = fib_get_first(seq);
-
-	if (fa)
-		while (pos && (fa = fib_get_next(seq)))
-			--pos;
-	return pos ? NULL : fa;
-}
-
-static void *fib_seq_start(struct seq_file *seq, loff_t *pos)
-	__acquires(RCU)
-{
-	void *v = NULL;
-
-	rcu_read_lock();
-	if (fib_get_table(seq_file_net(seq), RT_TABLE_MAIN))
-		v = *pos ? fib_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
-	return v;
-}
-
-static void *fib_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
-	++*pos;
-	return v == SEQ_START_TOKEN ? fib_get_first(seq) : fib_get_next(seq);
-}
-
-static void fib_seq_stop(struct seq_file *seq, void *v)
-	__releases(RCU)
-{
-	rcu_read_unlock();
-}
-
-static unsigned fib_flag_trans(int type, __be32 mask, struct fib_info *fi)
-{
-	static const unsigned type2flags[RTN_MAX + 1] = {
-		[7] = RTF_REJECT,
-		[8] = RTF_REJECT,
-	};
-	unsigned flags = type2flags[type];
-
-	if (fi && fi->fib_nh->nh_gw)
-		flags |= RTF_GATEWAY;
-	if (mask == htonl(0xFFFFFFFF))
-		flags |= RTF_HOST;
-	flags |= RTF_UP;
-	return flags;
-}
-
-/*
- *	This outputs /proc/net/route.
- *
- *	It always works in backward compatibility mode.
- *	The format of the file is not supposed to be changed.
- */
-static int fib_seq_show(struct seq_file *seq, void *v)
-{
-	struct fib_iter_state *iter;
-	int len;
-	__be32 prefix, mask;
-	unsigned flags;
-	struct fib_node *f;
-	struct fib_alias *fa;
-	struct fib_info *fi;
-
-	if (v == SEQ_START_TOKEN) {
-		seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway "
-			   "\tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU"
-			   "\tWindow\tIRTT");
-		goto out;
-	}
-
-	iter	= seq->private;
-	f	= iter->fn;
-	fa	= iter->fa;
-	fi	= fa->fa_info;
-	prefix	= f->fn_key;
-	mask	= FZ_MASK(iter->zone);
-	flags	= fib_flag_trans(fa->fa_type, mask, fi);
-	if (fi)
-		seq_printf(seq,
-			 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u%n",
-			 fi->fib_dev ? fi->fib_dev->name : "*", prefix,
-			 fi->fib_nh->nh_gw, flags, 0, 0, fi->fib_priority,
-			 mask, (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
-			 fi->fib_window,
-			 fi->fib_rtt >> 3, &len);
-	else
-		seq_printf(seq,
-			 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u%n",
-			 prefix, 0, flags, 0, 0, 0, mask, 0, 0, 0, &len);
-
-	seq_printf(seq, "%*s\n", 127 - len, "");
-out:
-	return 0;
-}
-
-static const struct seq_operations fib_seq_ops = {
-	.start  = fib_seq_start,
-	.next   = fib_seq_next,
-	.stop   = fib_seq_stop,
-	.show   = fib_seq_show,
-};
-
-static int fib_seq_open(struct inode *inode, struct file *file)
-{
-	return seq_open_net(inode, file, &fib_seq_ops,
-			    sizeof(struct fib_iter_state));
-}
-
-static const struct file_operations fib_seq_fops = {
-	.owner		= THIS_MODULE,
-	.open           = fib_seq_open,
-	.read           = seq_read,
-	.llseek         = seq_lseek,
-	.release	= seq_release_net,
-};
-
-int __net_init fib_proc_init(struct net *net)
-{
-	if (!proc_net_fops_create(net, "route", S_IRUGO, &fib_seq_fops))
-		return -ENOMEM;
-	return 0;
-}
-
-void __net_exit fib_proc_exit(struct net *net)
-{
-	proc_net_remove(net, "route");
-}
-#endif /* CONFIG_PROC_FS */
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index c079cc0..d5c40d8 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -25,7 +25,7 @@
 }
 
 /* Exported by fib_semantics.c */
-extern int fib_semantic_match(struct list_head *head,
+extern int fib_semantic_match(struct fib_table *tb, struct list_head *head,
 			      const struct flowi *flp,
 			      struct fib_result *res, int prefixlen, int fib_flags);
 extern void fib_release_info(struct fib_info *);
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 7981a24..3018efb 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -41,13 +41,13 @@
 	__be32			srcmask;
 	__be32			dst;
 	__be32			dstmask;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 	u32			tclassid;
 #endif
 };
 
-#ifdef CONFIG_NET_CLS_ROUTE
-u32 fib_rules_tclass(struct fib_result *res)
+#ifdef CONFIG_IP_ROUTE_CLASSID
+u32 fib_rules_tclass(const struct fib_result *res)
 {
 	return res->r ? ((struct fib4_rule *) res->r)->tclassid : 0;
 }
@@ -165,7 +165,7 @@
 	if (frh->dst_len)
 		rule4->dst = nla_get_be32(tb[FRA_DST]);
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 	if (tb[FRA_FLOW])
 		rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
 #endif
@@ -195,7 +195,7 @@
 	if (frh->tos && (rule4->tos != frh->tos))
 		return 0;
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 	if (tb[FRA_FLOW] && (rule4->tclassid != nla_get_u32(tb[FRA_FLOW])))
 		return 0;
 #endif
@@ -224,7 +224,7 @@
 	if (rule4->src_len)
 		NLA_PUT_BE32(skb, FRA_SRC, rule4->src);
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 	if (rule4->tclassid)
 		NLA_PUT_U32(skb, FRA_FLOW, rule4->tclassid);
 #endif
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 12d3dc3..562f34c 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -49,7 +49,7 @@
 static DEFINE_SPINLOCK(fib_info_lock);
 static struct hlist_head *fib_info_hash;
 static struct hlist_head *fib_info_laddrhash;
-static unsigned int fib_hash_size;
+static unsigned int fib_info_hash_size;
 static unsigned int fib_info_cnt;
 
 #define DEVINDEX_HASHBITS 8
@@ -152,6 +152,8 @@
 {
 	struct fib_info *fi = container_of(head, struct fib_info, rcu);
 
+	if (fi->fib_metrics != (u32 *) dst_default_metrics)
+		kfree(fi->fib_metrics);
 	kfree(fi);
 }
 
@@ -200,7 +202,7 @@
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 		    nh->nh_weight != onh->nh_weight ||
 #endif
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 		    nh->nh_tclassid != onh->nh_tclassid ||
 #endif
 		    ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD))
@@ -221,7 +223,7 @@
 
 static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
 {
-	unsigned int mask = (fib_hash_size - 1);
+	unsigned int mask = (fib_info_hash_size - 1);
 	unsigned int val = fi->fib_nhs;
 
 	val ^= fi->fib_protocol;
@@ -422,7 +424,7 @@
 
 			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
 			nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 			nla = nla_find(attrs, attrlen, RTA_FLOW);
 			nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
 #endif
@@ -476,7 +478,7 @@
 			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
 			if (nla && nla_get_be32(nla) != nh->nh_gw)
 				return 1;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 			nla = nla_find(attrs, attrlen, RTA_FLOW);
 			if (nla && nla_get_u32(nla) != nh->nh_tclassid)
 				return 1;
@@ -613,14 +615,14 @@
 
 static inline unsigned int fib_laddr_hashfn(__be32 val)
 {
-	unsigned int mask = (fib_hash_size - 1);
+	unsigned int mask = (fib_info_hash_size - 1);
 
 	return ((__force u32)val ^
 		((__force u32)val >> 7) ^
 		((__force u32)val >> 14)) & mask;
 }
 
-static struct hlist_head *fib_hash_alloc(int bytes)
+static struct hlist_head *fib_info_hash_alloc(int bytes)
 {
 	if (bytes <= PAGE_SIZE)
 		return kzalloc(bytes, GFP_KERNEL);
@@ -630,7 +632,7 @@
 					 get_order(bytes));
 }
 
-static void fib_hash_free(struct hlist_head *hash, int bytes)
+static void fib_info_hash_free(struct hlist_head *hash, int bytes)
 {
 	if (!hash)
 		return;
@@ -641,18 +643,18 @@
 		free_pages((unsigned long) hash, get_order(bytes));
 }
 
-static void fib_hash_move(struct hlist_head *new_info_hash,
-			  struct hlist_head *new_laddrhash,
-			  unsigned int new_size)
+static void fib_info_hash_move(struct hlist_head *new_info_hash,
+			       struct hlist_head *new_laddrhash,
+			       unsigned int new_size)
 {
 	struct hlist_head *old_info_hash, *old_laddrhash;
-	unsigned int old_size = fib_hash_size;
+	unsigned int old_size = fib_info_hash_size;
 	unsigned int i, bytes;
 
 	spin_lock_bh(&fib_info_lock);
 	old_info_hash = fib_info_hash;
 	old_laddrhash = fib_info_laddrhash;
-	fib_hash_size = new_size;
+	fib_info_hash_size = new_size;
 
 	for (i = 0; i < old_size; i++) {
 		struct hlist_head *head = &fib_info_hash[i];
@@ -693,8 +695,8 @@
 	spin_unlock_bh(&fib_info_lock);
 
 	bytes = old_size * sizeof(struct hlist_head *);
-	fib_hash_free(old_info_hash, bytes);
-	fib_hash_free(old_laddrhash, bytes);
+	fib_info_hash_free(old_info_hash, bytes);
+	fib_info_hash_free(old_laddrhash, bytes);
 }
 
 struct fib_info *fib_create_info(struct fib_config *cfg)
@@ -718,8 +720,8 @@
 #endif
 
 	err = -ENOBUFS;
-	if (fib_info_cnt >= fib_hash_size) {
-		unsigned int new_size = fib_hash_size << 1;
+	if (fib_info_cnt >= fib_info_hash_size) {
+		unsigned int new_size = fib_info_hash_size << 1;
 		struct hlist_head *new_info_hash;
 		struct hlist_head *new_laddrhash;
 		unsigned int bytes;
@@ -727,21 +729,27 @@
 		if (!new_size)
 			new_size = 1;
 		bytes = new_size * sizeof(struct hlist_head *);
-		new_info_hash = fib_hash_alloc(bytes);
-		new_laddrhash = fib_hash_alloc(bytes);
+		new_info_hash = fib_info_hash_alloc(bytes);
+		new_laddrhash = fib_info_hash_alloc(bytes);
 		if (!new_info_hash || !new_laddrhash) {
-			fib_hash_free(new_info_hash, bytes);
-			fib_hash_free(new_laddrhash, bytes);
+			fib_info_hash_free(new_info_hash, bytes);
+			fib_info_hash_free(new_laddrhash, bytes);
 		} else
-			fib_hash_move(new_info_hash, new_laddrhash, new_size);
+			fib_info_hash_move(new_info_hash, new_laddrhash, new_size);
 
-		if (!fib_hash_size)
+		if (!fib_info_hash_size)
 			goto failure;
 	}
 
 	fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
 	if (fi == NULL)
 		goto failure;
+	if (cfg->fc_mx) {
+		fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
+		if (!fi->fib_metrics)
+			goto failure;
+	} else
+		fi->fib_metrics = (u32 *) dst_default_metrics;
 	fib_info_cnt++;
 
 	fi->fib_net = hold_net(net);
@@ -779,7 +787,7 @@
 			goto err_inval;
 		if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw)
 			goto err_inval;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 		if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow)
 			goto err_inval;
 #endif
@@ -792,7 +800,7 @@
 		nh->nh_oif = cfg->fc_oif;
 		nh->nh_gw = cfg->fc_gw;
 		nh->nh_flags = cfg->fc_flags;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 		nh->nh_tclassid = cfg->fc_flow;
 #endif
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -881,8 +889,9 @@
 }
 
 /* Note! fib_semantic_match intentionally uses  RCU list functions. */
-int fib_semantic_match(struct list_head *head, const struct flowi *flp,
-		       struct fib_result *res, int prefixlen, int fib_flags)
+int fib_semantic_match(struct fib_table *tb, struct list_head *head,
+		       const struct flowi *flp, struct fib_result *res,
+		       int prefixlen, int fib_flags)
 {
 	struct fib_alias *fa;
 	int nh_sel = 0;
@@ -946,6 +955,8 @@
 	res->type = fa->fa_type;
 	res->scope = fa->fa_scope;
 	res->fi = fa->fa_info;
+	res->table = tb;
+	res->fa_head = head;
 	if (!(fib_flags & FIB_LOOKUP_NOREF))
 		atomic_inc(&res->fi->fib_clntref);
 	return 0;
@@ -1002,7 +1013,7 @@
 
 		if (fi->fib_nh->nh_oif)
 			NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 		if (fi->fib_nh[0].nh_tclassid)
 			NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid);
 #endif
@@ -1027,7 +1038,7 @@
 
 			if (nh->nh_gw)
 				NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 			if (nh->nh_tclassid)
 				NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid);
 #endif
@@ -1125,6 +1136,62 @@
 	return ret;
 }
 
+/* Must be invoked inside of an RCU protected region.  */
+void fib_select_default(struct fib_result *res)
+{
+	struct fib_info *fi = NULL, *last_resort = NULL;
+	struct list_head *fa_head = res->fa_head;
+	struct fib_table *tb = res->table;
+	int order = -1, last_idx = -1;
+	struct fib_alias *fa;
+
+	list_for_each_entry_rcu(fa, fa_head, fa_list) {
+		struct fib_info *next_fi = fa->fa_info;
+
+		if (fa->fa_scope != res->scope ||
+		    fa->fa_type != RTN_UNICAST)
+			continue;
+
+		if (next_fi->fib_priority > res->fi->fib_priority)
+			break;
+		if (!next_fi->fib_nh[0].nh_gw ||
+		    next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
+			continue;
+
+		fib_alias_accessed(fa);
+
+		if (fi == NULL) {
+			if (next_fi != res->fi)
+				break;
+		} else if (!fib_detect_death(fi, order, &last_resort,
+					     &last_idx, tb->tb_default)) {
+			fib_result_assign(res, fi);
+			tb->tb_default = order;
+			goto out;
+		}
+		fi = next_fi;
+		order++;
+	}
+
+	if (order <= 0 || fi == NULL) {
+		tb->tb_default = -1;
+		goto out;
+	}
+
+	if (!fib_detect_death(fi, order, &last_resort, &last_idx,
+				tb->tb_default)) {
+		fib_result_assign(res, fi);
+		tb->tb_default = order;
+		goto out;
+	}
+
+	if (last_idx >= 0)
+		fib_result_assign(res, last_resort);
+	tb->tb_default = last_idx;
+out:
+	return;
+}
+
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
 
 /*
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 0f28034..edf3b09 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -95,7 +95,7 @@
 #define IS_TNODE(n) (!(n->parent & T_LEAF))
 #define IS_LEAF(n) (n->parent & T_LEAF)
 
-struct node {
+struct rt_trie_node {
 	unsigned long parent;
 	t_key key;
 };
@@ -126,7 +126,7 @@
 		struct work_struct work;
 		struct tnode *tnode_free;
 	};
-	struct node *child[0];
+	struct rt_trie_node *child[0];
 };
 
 #ifdef CONFIG_IP_FIB_TRIE_STATS
@@ -151,16 +151,16 @@
 };
 
 struct trie {
-	struct node *trie;
+	struct rt_trie_node *trie;
 #ifdef CONFIG_IP_FIB_TRIE_STATS
 	struct trie_use_stats stats;
 #endif
 };
 
-static void put_child(struct trie *t, struct tnode *tn, int i, struct node *n);
-static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
+static void put_child(struct trie *t, struct tnode *tn, int i, struct rt_trie_node *n);
+static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
 				  int wasfull);
-static struct node *resize(struct trie *t, struct tnode *tn);
+static struct rt_trie_node *resize(struct trie *t, struct tnode *tn);
 static struct tnode *inflate(struct trie *t, struct tnode *tn);
 static struct tnode *halve(struct trie *t, struct tnode *tn);
 /* tnodes to free after resize(); protected by RTNL */
@@ -177,12 +177,12 @@
 static struct kmem_cache *fn_alias_kmem __read_mostly;
 static struct kmem_cache *trie_leaf_kmem __read_mostly;
 
-static inline struct tnode *node_parent(struct node *node)
+static inline struct tnode *node_parent(struct rt_trie_node *node)
 {
 	return (struct tnode *)(node->parent & ~NODE_TYPE_MASK);
 }
 
-static inline struct tnode *node_parent_rcu(struct node *node)
+static inline struct tnode *node_parent_rcu(struct rt_trie_node *node)
 {
 	struct tnode *ret = node_parent(node);
 
@@ -192,22 +192,22 @@
 /* Same as rcu_assign_pointer
  * but that macro() assumes that value is a pointer.
  */
-static inline void node_set_parent(struct node *node, struct tnode *ptr)
+static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
 {
 	smp_wmb();
 	node->parent = (unsigned long)ptr | NODE_TYPE(node);
 }
 
-static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)
+static inline struct rt_trie_node *tnode_get_child(struct tnode *tn, unsigned int i)
 {
 	BUG_ON(i >= 1U << tn->bits);
 
 	return tn->child[i];
 }
 
-static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
+static inline struct rt_trie_node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
 {
-	struct node *ret = tnode_get_child(tn, i);
+	struct rt_trie_node *ret = tnode_get_child(tn, i);
 
 	return rcu_dereference_rtnl(ret);
 }
@@ -217,12 +217,12 @@
 	return 1 << tn->bits;
 }
 
-static inline t_key mask_pfx(t_key k, unsigned short l)
+static inline t_key mask_pfx(t_key k, unsigned int l)
 {
 	return (l == 0) ? 0 : k >> (KEYLENGTH-l) << (KEYLENGTH-l);
 }
 
-static inline t_key tkey_extract_bits(t_key a, int offset, int bits)
+static inline t_key tkey_extract_bits(t_key a, unsigned int offset, unsigned int bits)
 {
 	if (offset < KEYLENGTH)
 		return ((t_key)(a << offset)) >> (KEYLENGTH - bits);
@@ -378,7 +378,7 @@
 {
 	struct tnode *tn = container_of(head, struct tnode, rcu);
 	size_t size = sizeof(struct tnode) +
-		      (sizeof(struct node *) << tn->bits);
+		      (sizeof(struct rt_trie_node *) << tn->bits);
 
 	if (size <= PAGE_SIZE)
 		kfree(tn);
@@ -402,7 +402,7 @@
 	tn->tnode_free = tnode_free_head;
 	tnode_free_head = tn;
 	tnode_free_size += sizeof(struct tnode) +
-			   (sizeof(struct node *) << tn->bits);
+			   (sizeof(struct rt_trie_node *) << tn->bits);
 }
 
 static void tnode_free_flush(void)
@@ -443,7 +443,7 @@
 
 static struct tnode *tnode_new(t_key key, int pos, int bits)
 {
-	size_t sz = sizeof(struct tnode) + (sizeof(struct node *) << bits);
+	size_t sz = sizeof(struct tnode) + (sizeof(struct rt_trie_node *) << bits);
 	struct tnode *tn = tnode_alloc(sz);
 
 	if (tn) {
@@ -456,7 +456,7 @@
 	}
 
 	pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
-		 sizeof(struct node) << bits);
+		 sizeof(struct rt_trie_node) << bits);
 	return tn;
 }
 
@@ -465,7 +465,7 @@
  * and no bits are skipped. See discussion in dyntree paper p. 6
  */
 
-static inline int tnode_full(const struct tnode *tn, const struct node *n)
+static inline int tnode_full(const struct tnode *tn, const struct rt_trie_node *n)
 {
 	if (n == NULL || IS_LEAF(n))
 		return 0;
@@ -474,7 +474,7 @@
 }
 
 static inline void put_child(struct trie *t, struct tnode *tn, int i,
-			     struct node *n)
+			     struct rt_trie_node *n)
 {
 	tnode_put_child_reorg(tn, i, n, -1);
 }
@@ -484,10 +484,10 @@
   * Update the value of full_children and empty_children.
   */
 
-static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
+static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
 				  int wasfull)
 {
-	struct node *chi = tn->child[i];
+	struct rt_trie_node *chi = tn->child[i];
 	int isfull;
 
 	BUG_ON(i >= 1<<tn->bits);
@@ -515,7 +515,7 @@
 }
 
 #define MAX_WORK 10
-static struct node *resize(struct trie *t, struct tnode *tn)
+static struct rt_trie_node *resize(struct trie *t, struct tnode *tn)
 {
 	int i;
 	struct tnode *old_tn;
@@ -605,7 +605,7 @@
 
 	/* Keep root node larger  */
 
-	if (!node_parent((struct node *)tn)) {
+	if (!node_parent((struct rt_trie_node *)tn)) {
 		inflate_threshold_use = inflate_threshold_root;
 		halve_threshold_use = halve_threshold_root;
 	} else {
@@ -635,7 +635,7 @@
 
 	/* Return if at least one inflate is run */
 	if (max_work != MAX_WORK)
-		return (struct node *) tn;
+		return (struct rt_trie_node *) tn;
 
 	/*
 	 * Halve as long as the number of empty children in this
@@ -663,7 +663,7 @@
 	if (tn->empty_children == tnode_child_length(tn) - 1) {
 one_child:
 		for (i = 0; i < tnode_child_length(tn); i++) {
-			struct node *n;
+			struct rt_trie_node *n;
 
 			n = tn->child[i];
 			if (!n)
@@ -676,7 +676,7 @@
 			return n;
 		}
 	}
-	return (struct node *) tn;
+	return (struct rt_trie_node *) tn;
 }
 
 static struct tnode *inflate(struct trie *t, struct tnode *tn)
@@ -723,14 +723,14 @@
 				goto nomem;
 			}
 
-			put_child(t, tn, 2*i, (struct node *) left);
-			put_child(t, tn, 2*i+1, (struct node *) right);
+			put_child(t, tn, 2*i, (struct rt_trie_node *) left);
+			put_child(t, tn, 2*i+1, (struct rt_trie_node *) right);
 		}
 	}
 
 	for (i = 0; i < olen; i++) {
 		struct tnode *inode;
-		struct node *node = tnode_get_child(oldtnode, i);
+		struct rt_trie_node *node = tnode_get_child(oldtnode, i);
 		struct tnode *left, *right;
 		int size, j;
 
@@ -825,7 +825,7 @@
 static struct tnode *halve(struct trie *t, struct tnode *tn)
 {
 	struct tnode *oldtnode = tn;
-	struct node *left, *right;
+	struct rt_trie_node *left, *right;
 	int i;
 	int olen = tnode_child_length(tn);
 
@@ -856,7 +856,7 @@
 			if (!newn)
 				goto nomem;
 
-			put_child(t, tn, i/2, (struct node *)newn);
+			put_child(t, tn, i/2, (struct rt_trie_node *)newn);
 		}
 
 	}
@@ -958,7 +958,7 @@
 {
 	int pos;
 	struct tnode *tn;
-	struct node *n;
+	struct rt_trie_node *n;
 
 	pos = 0;
 	n = rcu_dereference_rtnl(t->trie);
@@ -993,17 +993,17 @@
 
 	key = tn->key;
 
-	while (tn != NULL && (tp = node_parent((struct node *)tn)) != NULL) {
+	while (tn != NULL && (tp = node_parent((struct rt_trie_node *)tn)) != NULL) {
 		cindex = tkey_extract_bits(key, tp->pos, tp->bits);
 		wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
 		tn = (struct tnode *) resize(t, (struct tnode *)tn);
 
 		tnode_put_child_reorg((struct tnode *)tp, cindex,
-				      (struct node *)tn, wasfull);
+				      (struct rt_trie_node *)tn, wasfull);
 
-		tp = node_parent((struct node *) tn);
+		tp = node_parent((struct rt_trie_node *) tn);
 		if (!tp)
-			rcu_assign_pointer(t->trie, (struct node *)tn);
+			rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
 
 		tnode_free_flush();
 		if (!tp)
@@ -1015,7 +1015,7 @@
 	if (IS_TNODE(tn))
 		tn = (struct tnode *)resize(t, (struct tnode *)tn);
 
-	rcu_assign_pointer(t->trie, (struct node *)tn);
+	rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
 	tnode_free_flush();
 }
 
@@ -1025,7 +1025,7 @@
 {
 	int pos, newpos;
 	struct tnode *tp = NULL, *tn = NULL;
-	struct node *n;
+	struct rt_trie_node *n;
 	struct leaf *l;
 	int missbit;
 	struct list_head *fa_head = NULL;
@@ -1111,10 +1111,10 @@
 	if (t->trie && n == NULL) {
 		/* Case 2: n is NULL, and will just insert a new leaf */
 
-		node_set_parent((struct node *)l, tp);
+		node_set_parent((struct rt_trie_node *)l, tp);
 
 		cindex = tkey_extract_bits(key, tp->pos, tp->bits);
-		put_child(t, (struct tnode *)tp, cindex, (struct node *)l);
+		put_child(t, (struct tnode *)tp, cindex, (struct rt_trie_node *)l);
 	} else {
 		/* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
 		/*
@@ -1141,18 +1141,18 @@
 			return NULL;
 		}
 
-		node_set_parent((struct node *)tn, tp);
+		node_set_parent((struct rt_trie_node *)tn, tp);
 
 		missbit = tkey_extract_bits(key, newpos, 1);
-		put_child(t, tn, missbit, (struct node *)l);
+		put_child(t, tn, missbit, (struct rt_trie_node *)l);
 		put_child(t, tn, 1-missbit, n);
 
 		if (tp) {
 			cindex = tkey_extract_bits(key, tp->pos, tp->bits);
 			put_child(t, (struct tnode *)tp, cindex,
-				  (struct node *)tn);
+				  (struct rt_trie_node *)tn);
 		} else {
-			rcu_assign_pointer(t->trie, (struct node *)tn);
+			rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
 			tp = tn;
 		}
 	}
@@ -1340,7 +1340,7 @@
 }
 
 /* should be called with rcu_read_lock */
-static int check_leaf(struct trie *t, struct leaf *l,
+static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
 		      t_key key,  const struct flowi *flp,
 		      struct fib_result *res, int fib_flags)
 {
@@ -1356,7 +1356,7 @@
 		if (l->key != (key & ntohl(mask)))
 			continue;
 
-		err = fib_semantic_match(&li->falh, flp, res, plen, fib_flags);
+		err = fib_semantic_match(tb, &li->falh, flp, res, plen, fib_flags);
 
 #ifdef CONFIG_IP_FIB_TRIE_STATS
 		if (err <= 0)
@@ -1376,13 +1376,13 @@
 {
 	struct trie *t = (struct trie *) tb->tb_data;
 	int ret;
-	struct node *n;
+	struct rt_trie_node *n;
 	struct tnode *pn;
-	int pos, bits;
+	unsigned int pos, bits;
 	t_key key = ntohl(flp->fl4_dst);
-	int chopped_off;
+	unsigned int chopped_off;
 	t_key cindex = 0;
-	int current_prefix_length = KEYLENGTH;
+	unsigned int current_prefix_length = KEYLENGTH;
 	struct tnode *cn;
 	t_key pref_mismatch;
 
@@ -1398,7 +1398,7 @@
 
 	/* Just a leaf? */
 	if (IS_LEAF(n)) {
-		ret = check_leaf(t, (struct leaf *)n, key, flp, res, fib_flags);
+		ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags);
 		goto found;
 	}
 
@@ -1423,7 +1423,7 @@
 		}
 
 		if (IS_LEAF(n)) {
-			ret = check_leaf(t, (struct leaf *)n, key, flp, res, fib_flags);
+			ret = check_leaf(tb, t, (struct leaf *)n, key, flp, res, fib_flags);
 			if (ret > 0)
 				goto backtrace;
 			goto found;
@@ -1541,7 +1541,7 @@
 		if (chopped_off <= pn->bits) {
 			cindex &= ~(1 << (chopped_off-1));
 		} else {
-			struct tnode *parent = node_parent_rcu((struct node *) pn);
+			struct tnode *parent = node_parent_rcu((struct rt_trie_node *) pn);
 			if (!parent)
 				goto failed;
 
@@ -1568,7 +1568,7 @@
  */
 static void trie_leaf_remove(struct trie *t, struct leaf *l)
 {
-	struct tnode *tp = node_parent((struct node *) l);
+	struct tnode *tp = node_parent((struct rt_trie_node *) l);
 
 	pr_debug("entering trie_leaf_remove(%p)\n", l);
 
@@ -1706,7 +1706,7 @@
  * Scan for the next right leaf starting at node p->child[idx]
  * Since we have back pointer, no recursion necessary.
  */
-static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
+static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
 {
 	do {
 		t_key idx;
@@ -1732,7 +1732,7 @@
 		}
 
 		/* Node empty, walk back up to parent */
-		c = (struct node *) p;
+		c = (struct rt_trie_node *) p;
 	} while ((p = node_parent_rcu(c)) != NULL);
 
 	return NULL; /* Root of trie */
@@ -1753,7 +1753,7 @@
 
 static struct leaf *trie_nextleaf(struct leaf *l)
 {
-	struct node *c = (struct node *) l;
+	struct rt_trie_node *c = (struct rt_trie_node *) l;
 	struct tnode *p = node_parent_rcu(c);
 
 	if (!p)
@@ -1802,80 +1802,6 @@
 	kfree(tb);
 }
 
-void fib_table_select_default(struct fib_table *tb,
-			      const struct flowi *flp,
-			      struct fib_result *res)
-{
-	struct trie *t = (struct trie *) tb->tb_data;
-	int order, last_idx;
-	struct fib_info *fi = NULL;
-	struct fib_info *last_resort;
-	struct fib_alias *fa = NULL;
-	struct list_head *fa_head;
-	struct leaf *l;
-
-	last_idx = -1;
-	last_resort = NULL;
-	order = -1;
-
-	rcu_read_lock();
-
-	l = fib_find_node(t, 0);
-	if (!l)
-		goto out;
-
-	fa_head = get_fa_head(l, 0);
-	if (!fa_head)
-		goto out;
-
-	if (list_empty(fa_head))
-		goto out;
-
-	list_for_each_entry_rcu(fa, fa_head, fa_list) {
-		struct fib_info *next_fi = fa->fa_info;
-
-		if (fa->fa_scope != res->scope ||
-		    fa->fa_type != RTN_UNICAST)
-			continue;
-
-		if (next_fi->fib_priority > res->fi->fib_priority)
-			break;
-		if (!next_fi->fib_nh[0].nh_gw ||
-		    next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
-			continue;
-
-		fib_alias_accessed(fa);
-
-		if (fi == NULL) {
-			if (next_fi != res->fi)
-				break;
-		} else if (!fib_detect_death(fi, order, &last_resort,
-					     &last_idx, tb->tb_default)) {
-			fib_result_assign(res, fi);
-			tb->tb_default = order;
-			goto out;
-		}
-		fi = next_fi;
-		order++;
-	}
-	if (order <= 0 || fi == NULL) {
-		tb->tb_default = -1;
-		goto out;
-	}
-
-	if (!fib_detect_death(fi, order, &last_resort, &last_idx,
-				tb->tb_default)) {
-		fib_result_assign(res, fi);
-		tb->tb_default = order;
-		goto out;
-	}
-	if (last_idx >= 0)
-		fib_result_assign(res, last_resort);
-	tb->tb_default = last_idx;
-out:
-	rcu_read_unlock();
-}
-
 static int fn_trie_dump_fa(t_key key, int plen, struct list_head *fah,
 			   struct fib_table *tb,
 			   struct sk_buff *skb, struct netlink_callback *cb)
@@ -1990,7 +1916,7 @@
 	return skb->len;
 }
 
-void __init fib_hash_init(void)
+void __init fib_trie_init(void)
 {
 	fn_alias_kmem = kmem_cache_create("ip_fib_alias",
 					  sizeof(struct fib_alias),
@@ -2003,8 +1929,7 @@
 }
 
 
-/* Fix more generic FIB names for init later */
-struct fib_table *fib_hash_table(u32 id)
+struct fib_table *fib_trie_table(u32 id)
 {
 	struct fib_table *tb;
 	struct trie *t;
@@ -2036,7 +1961,7 @@
 	unsigned int depth;
 };
 
-static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
+static struct rt_trie_node *fib_trie_get_next(struct fib_trie_iter *iter)
 {
 	struct tnode *tn = iter->tnode;
 	unsigned int cindex = iter->index;
@@ -2050,7 +1975,7 @@
 		 iter->tnode, iter->index, iter->depth);
 rescan:
 	while (cindex < (1<<tn->bits)) {
-		struct node *n = tnode_get_child_rcu(tn, cindex);
+		struct rt_trie_node *n = tnode_get_child_rcu(tn, cindex);
 
 		if (n) {
 			if (IS_LEAF(n)) {
@@ -2069,7 +1994,7 @@
 	}
 
 	/* Current node exhausted, pop back up */
-	p = node_parent_rcu((struct node *)tn);
+	p = node_parent_rcu((struct rt_trie_node *)tn);
 	if (p) {
 		cindex = tkey_extract_bits(tn->key, p->pos, p->bits)+1;
 		tn = p;
@@ -2081,10 +2006,10 @@
 	return NULL;
 }
 
-static struct node *fib_trie_get_first(struct fib_trie_iter *iter,
+static struct rt_trie_node *fib_trie_get_first(struct fib_trie_iter *iter,
 				       struct trie *t)
 {
-	struct node *n;
+	struct rt_trie_node *n;
 
 	if (!t)
 		return NULL;
@@ -2108,7 +2033,7 @@
 
 static void trie_collect_stats(struct trie *t, struct trie_stat *s)
 {
-	struct node *n;
+	struct rt_trie_node *n;
 	struct fib_trie_iter iter;
 
 	memset(s, 0, sizeof(*s));
@@ -2181,7 +2106,7 @@
 	seq_putc(seq, '\n');
 	seq_printf(seq, "\tPointers: %u\n", pointers);
 
-	bytes += sizeof(struct node *) * pointers;
+	bytes += sizeof(struct rt_trie_node *) * pointers;
 	seq_printf(seq, "Null ptrs: %u\n", stat->nullpointers);
 	seq_printf(seq, "Total size: %u  kB\n", (bytes + 1023) / 1024);
 }
@@ -2262,7 +2187,7 @@
 	.release = single_release_net,
 };
 
-static struct node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
+static struct rt_trie_node *fib_trie_get_idx(struct seq_file *seq, loff_t pos)
 {
 	struct fib_trie_iter *iter = seq->private;
 	struct net *net = seq_file_net(seq);
@@ -2275,7 +2200,7 @@
 		struct fib_table *tb;
 
 		hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
-			struct node *n;
+			struct rt_trie_node *n;
 
 			for (n = fib_trie_get_first(iter,
 						    (struct trie *) tb->tb_data);
@@ -2304,7 +2229,7 @@
 	struct fib_table *tb = iter->tb;
 	struct hlist_node *tb_node;
 	unsigned int h;
-	struct node *n;
+	struct rt_trie_node *n;
 
 	++*pos;
 	/* next node in same table */
@@ -2390,7 +2315,7 @@
 static int fib_trie_seq_show(struct seq_file *seq, void *v)
 {
 	const struct fib_trie_iter *iter = seq->private;
-	struct node *n = v;
+	struct rt_trie_node *n = v;
 
 	if (!node_parent_rcu(n))
 		fib_table_print(seq, iter->tb);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 4aa1b7f..ad2bcf1 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -233,48 +233,11 @@
  *	Send an ICMP frame.
  */
 
-/*
- *	Check transmit rate limitation for given message.
- *	The rate information is held in the destination cache now.
- *	This function is generic and could be used for other purposes
- *	too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
- *
- *	Note that the same dst_entry fields are modified by functions in
- *	route.c too, but these work for packet destinations while xrlim_allow
- *	works for icmp destinations. This means the rate limiting information
- *	for one "ip object" is shared - and these ICMPs are twice limited:
- *	by source and by destination.
- *
- *	RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
- *			  SHOULD allow setting of rate limits
- *
- * 	Shared between ICMPv4 and ICMPv6.
- */
-#define XRLIM_BURST_FACTOR 6
-int xrlim_allow(struct dst_entry *dst, int timeout)
-{
-	unsigned long now, token = dst->rate_tokens;
-	int rc = 0;
-
-	now = jiffies;
-	token += now - dst->rate_last;
-	dst->rate_last = now;
-	if (token > XRLIM_BURST_FACTOR * timeout)
-		token = XRLIM_BURST_FACTOR * timeout;
-	if (token >= timeout) {
-		token -= timeout;
-		rc = 1;
-	}
-	dst->rate_tokens = token;
-	return rc;
-}
-EXPORT_SYMBOL(xrlim_allow);
-
-static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
+static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
 		int type, int code)
 {
 	struct dst_entry *dst = &rt->dst;
-	int rc = 1;
+	bool rc = true;
 
 	if (type > NR_ICMP_TYPES)
 		goto out;
@@ -288,8 +251,12 @@
 		goto out;
 
 	/* Limit if icmp type is enabled in ratemask. */
-	if ((1 << type) & net->ipv4.sysctl_icmp_ratemask)
-		rc = xrlim_allow(dst, net->ipv4.sysctl_icmp_ratelimit);
+	if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) {
+		if (!rt->peer)
+			rt_bind_peer(rt, 1);
+		rc = inet_peer_xrlim_allow(rt->peer,
+					   net->ipv4.sysctl_icmp_ratelimit);
+	}
 out:
 	return rc;
 }
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 2746c1f..2ada171 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -858,7 +858,7 @@
 	    nlmsg_len(nlh) < hdrlen)
 		return -EINVAL;
 
-	if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
+	if (nlh->nlmsg_flags & NLM_F_DUMP) {
 		if (nlmsg_attrlen(nlh, hdrlen)) {
 			struct nlattr *attr;
 
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index c5af909..3c8dfa1 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -505,7 +505,9 @@
 			}
 
 			rcu_read_unlock();
+			local_bh_disable();
 			inet_twsk_deschedule(tw, twdr);
+			local_bh_enable();
 			inet_twsk_put(tw);
 			goto restart_rcu;
 		}
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index d9bc857..48f8d45 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -167,9 +167,9 @@
 	int i, n = (a->family == AF_INET ? 1 : 4);
 
 	for (i = 0; i < n; i++) {
-		if (a->a6[i] == b->a6[i])
+		if (a->addr.a6[i] == b->addr.a6[i])
 			continue;
-		if (a->a6[i] < b->a6[i])
+		if (a->addr.a6[i] < b->addr.a6[i])
 			return -1;
 		return 1;
 	}
@@ -475,7 +475,7 @@
 struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
 {
 	struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
-	struct inet_peer_base *base = family_to_base(AF_INET);
+	struct inet_peer_base *base = family_to_base(daddr->family);
 	struct inet_peer *p;
 
 	/* Look up for the address quickly, lockless.
@@ -510,8 +510,13 @@
 		p->daddr = *daddr;
 		atomic_set(&p->refcnt, 1);
 		atomic_set(&p->rid, 0);
-		atomic_set(&p->ip_id_count, secure_ip_id(daddr->a4));
+		atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
 		p->tcp_ts_stamp = 0;
+		p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
+		p->rate_tokens = 0;
+		p->rate_last = 0;
+		p->pmtu_expires = 0;
+		memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
 		INIT_LIST_HEAD(&p->unused);
 
 
@@ -579,3 +584,44 @@
 	local_bh_enable();
 }
 EXPORT_SYMBOL_GPL(inet_putpeer);
+
+/*
+ *	Check transmit rate limitation for given message.
+ *	The rate information is held in the inet_peer entries now.
+ *	This function is generic and could be used for other purposes
+ *	too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
+ *
+ *	Note that the same inet_peer fields are modified by functions in
+ *	route.c too, but these work for packet destinations while xrlim_allow
+ *	works for icmp destinations. This means the rate limiting information
+ *	for one "ip object" is shared - and these ICMPs are twice limited:
+ *	by source and by destination.
+ *
+ *	RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
+ *			  SHOULD allow setting of rate limits
+ *
+ * 	Shared between ICMPv4 and ICMPv6.
+ */
+#define XRLIM_BURST_FACTOR 6
+bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
+{
+	unsigned long now, token;
+	bool rc = false;
+
+	if (!peer)
+		return true;
+
+	token = peer->rate_tokens;
+	now = jiffies;
+	token += now - peer->rate_last;
+	peer->rate_last = now;
+	if (token > XRLIM_BURST_FACTOR * timeout)
+		token = XRLIM_BURST_FACTOR * timeout;
+	if (token >= timeout) {
+		token -= timeout;
+		rc = true;
+	}
+	peer->rate_tokens = token;
+	return rc;
+}
+EXPORT_SYMBOL(inet_peer_xrlim_allow);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index eb68a0e..6613edf 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -775,6 +775,7 @@
 			.fl4_dst = dst,
 			.fl4_src = tiph->saddr,
 			.fl4_tos = RT_TOS(tos),
+			.proto = IPPROTO_GRE,
 			.fl_gre_key = tunnel->parms.o_key
 		};
 		if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index d859bcc..d7b2b09 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -340,7 +340,7 @@
 		}
 	}
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 	if (unlikely(skb_dst(skb)->tclassid)) {
 		struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
 		u32 idx = skb_dst(skb)->tclassid;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 3f3a9af..8b65a12 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -60,6 +60,7 @@
 #include <linux/notifier.h>
 #include <linux/if_arp.h>
 #include <linux/netfilter_ipv4.h>
+#include <linux/compat.h>
 #include <net/ipip.h>
 #include <net/checksum.h>
 #include <net/netlink.h>
@@ -1434,6 +1435,81 @@
 	}
 }
 
+#ifdef CONFIG_COMPAT
+struct compat_sioc_sg_req {
+	struct in_addr src;
+	struct in_addr grp;
+	compat_ulong_t pktcnt;
+	compat_ulong_t bytecnt;
+	compat_ulong_t wrong_if;
+};
+
+struct compat_sioc_vif_req {
+	vifi_t	vifi;		/* Which iface */
+	compat_ulong_t icount;
+	compat_ulong_t ocount;
+	compat_ulong_t ibytes;
+	compat_ulong_t obytes;
+};
+
+int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
+{
+	struct compat_sioc_sg_req sr;
+	struct compat_sioc_vif_req vr;
+	struct vif_device *vif;
+	struct mfc_cache *c;
+	struct net *net = sock_net(sk);
+	struct mr_table *mrt;
+
+	mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
+	if (mrt == NULL)
+		return -ENOENT;
+
+	switch (cmd) {
+	case SIOCGETVIFCNT:
+		if (copy_from_user(&vr, arg, sizeof(vr)))
+			return -EFAULT;
+		if (vr.vifi >= mrt->maxvif)
+			return -EINVAL;
+		read_lock(&mrt_lock);
+		vif = &mrt->vif_table[vr.vifi];
+		if (VIF_EXISTS(mrt, vr.vifi)) {
+			vr.icount = vif->pkt_in;
+			vr.ocount = vif->pkt_out;
+			vr.ibytes = vif->bytes_in;
+			vr.obytes = vif->bytes_out;
+			read_unlock(&mrt_lock);
+
+			if (copy_to_user(arg, &vr, sizeof(vr)))
+				return -EFAULT;
+			return 0;
+		}
+		read_unlock(&mrt_lock);
+		return -EADDRNOTAVAIL;
+	case SIOCGETSGCNT:
+		if (copy_from_user(&sr, arg, sizeof(sr)))
+			return -EFAULT;
+
+		rcu_read_lock();
+		c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
+		if (c) {
+			sr.pktcnt = c->mfc_un.res.pkt;
+			sr.bytecnt = c->mfc_un.res.bytes;
+			sr.wrong_if = c->mfc_un.res.wrong_if;
+			rcu_read_unlock();
+
+			if (copy_to_user(arg, &sr, sizeof(sr)))
+				return -EFAULT;
+			return 0;
+		}
+		rcu_read_unlock();
+		return -EADDRNOTAVAIL;
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+#endif
+
 
 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index babd1a2..f926a31 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -206,8 +206,9 @@
 
 config NF_NAT_SNMP_BASIC
 	tristate "Basic SNMP-ALG support"
-	depends on NF_NAT
+	depends on NF_CONNTRACK_SNMP && NF_NAT
 	depends on NETFILTER_ADVANCED
+	default NF_NAT && NF_CONNTRACK_SNMP
 	---help---
 
 	  This module implements an Application Layer Gateway (ALG) for
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index e855fff..e95054c 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -866,6 +866,7 @@
 	memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
 	newinfo->initial_entries = 0;
 	loc_cpu_entry = info->entries[raw_smp_processor_id()];
+	xt_compat_init_offsets(NFPROTO_ARP, info->number);
 	xt_entry_foreach(iter, loc_cpu_entry, info->size) {
 		ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
 		if (ret != 0)
@@ -1333,6 +1334,7 @@
 	duprintf("translate_compat_table: size %u\n", info->size);
 	j = 0;
 	xt_compat_lock(NFPROTO_ARP);
+	xt_compat_init_offsets(NFPROTO_ARP, number);
 	/* Walk through entries, checking offsets. */
 	xt_entry_foreach(iter0, entry0, total_size) {
 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c
index b8ddcc4..a5e52a9 100644
--- a/net/ipv4/netfilter/arpt_mangle.c
+++ b/net/ipv4/netfilter/arpt_mangle.c
@@ -60,12 +60,12 @@
 
 	if (mangle->flags & ~ARPT_MANGLE_MASK ||
 	    !(mangle->flags & ARPT_MANGLE_MASK))
-		return false;
+		return -EINVAL;
 
 	if (mangle->target != NF_DROP && mangle->target != NF_ACCEPT &&
 	   mangle->target != XT_CONTINUE)
-		return false;
-	return true;
+		return -EINVAL;
+	return 0;
 }
 
 static struct xt_target arpt_mangle_reg __read_mostly = {
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 652efea..ef7d7b9 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1063,6 +1063,7 @@
 	memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
 	newinfo->initial_entries = 0;
 	loc_cpu_entry = info->entries[raw_smp_processor_id()];
+	xt_compat_init_offsets(AF_INET, info->number);
 	xt_entry_foreach(iter, loc_cpu_entry, info->size) {
 		ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
 		if (ret != 0)
@@ -1664,6 +1665,7 @@
 	duprintf("translate_compat_table: size %u\n", info->size);
 	j = 0;
 	xt_compat_lock(AF_INET);
+	xt_compat_init_offsets(AF_INET, number);
 	/* Walk through entries, checking offsets. */
 	xt_entry_foreach(iter0, entry0, total_size) {
 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 1e26a48..403ca57 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -300,13 +300,8 @@
 	 * that the ->target() function isn't called after ->destroy() */
 
 	ct = nf_ct_get(skb, &ctinfo);
-	if (ct == NULL) {
-		pr_info("no conntrack!\n");
-			/* FIXME: need to drop invalid ones, since replies
-			 * to outgoing connections of other nodes will be
-			 * marked as INVALID */
+	if (ct == NULL)
 		return NF_DROP;
-	}
 
 	/* special case: ICMP error handling. conntrack distinguishes between
 	 * error messages (RELATED) and information requests (see below) */
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index 72ffc8f..d76d6c9 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -442,8 +442,7 @@
 	}
 #endif
 
-	/* MAC logging for input path only. */
-	if (in && !out)
+	if (in != NULL)
 		dump_mac_header(m, loginfo, skb);
 
 	dump_packet(m, loginfo, skb, 0);
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 294a2a3..aef5d1f 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -60,7 +60,7 @@
 	ret = ipt_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
 			   dev_net(out)->ipv4.iptable_mangle);
 	/* Reroute for ANY change. */
-	if (ret != NF_DROP && ret != NF_STOLEN && ret != NF_QUEUE) {
+	if (ret != NF_DROP && ret != NF_STOLEN) {
 		iph = ip_hdr(skb);
 
 		if (iph->saddr != saddr ||
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
index 63f60fc..5585980 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
@@ -20,6 +20,7 @@
 #include <net/netfilter/nf_conntrack_l4proto.h>
 #include <net/netfilter/nf_conntrack_expect.h>
 #include <net/netfilter/nf_conntrack_acct.h>
+#include <linux/rculist_nulls.h>
 
 struct ct_iter_state {
 	struct seq_net_private p;
@@ -35,7 +36,8 @@
 	for (st->bucket = 0;
 	     st->bucket < net->ct.htable_size;
 	     st->bucket++) {
-		n = rcu_dereference(net->ct.hash[st->bucket].first);
+		n = rcu_dereference(
+			hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
 		if (!is_a_nulls(n))
 			return n;
 	}
@@ -48,13 +50,14 @@
 	struct net *net = seq_file_net(seq);
 	struct ct_iter_state *st = seq->private;
 
-	head = rcu_dereference(head->next);
+	head = rcu_dereference(hlist_nulls_next_rcu(head));
 	while (is_a_nulls(head)) {
 		if (likely(get_nulls_value(head) == st->bucket)) {
 			if (++st->bucket >= net->ct.htable_size)
 				return NULL;
 		}
-		head = rcu_dereference(net->ct.hash[st->bucket].first);
+		head = rcu_dereference(
+			hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
 	}
 	return head;
 }
@@ -217,7 +220,8 @@
 	struct hlist_node *n;
 
 	for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
-		n = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+		n = rcu_dereference(
+			hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
 		if (n)
 			return n;
 	}
@@ -230,11 +234,12 @@
 	struct net *net = seq_file_net(seq);
 	struct ct_expect_iter_state *st = seq->private;
 
-	head = rcu_dereference(head->next);
+	head = rcu_dereference(hlist_next_rcu(head));
 	while (head == NULL) {
 		if (++st->bucket >= nf_ct_expect_hsize)
 			return NULL;
-		head = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+		head = rcu_dereference(
+			hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
 	}
 	return head;
 }
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c
index 0f23b3f..703f366f 100644
--- a/net/ipv4/netfilter/nf_nat_amanda.c
+++ b/net/ipv4/netfilter/nf_nat_amanda.c
@@ -44,13 +44,13 @@
 
 	/* Try to get same port: if not, try to change it. */
 	for (port = ntohs(exp->saved_proto.tcp.port); port != 0; port++) {
-		int ret;
+		int res;
 
 		exp->tuple.dst.u.tcp.port = htons(port);
-		ret = nf_ct_expect_related(exp);
-		if (ret == 0)
+		res = nf_ct_expect_related(exp);
+		if (res == 0)
 			break;
-		else if (ret != -EBUSY) {
+		else if (res != -EBUSY) {
 			port = 0;
 			break;
 		}
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index c04787c..21bcf47 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -221,7 +221,14 @@
 	   manips not an issue.  */
 	if (maniptype == IP_NAT_MANIP_SRC &&
 	    !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
-		if (find_appropriate_src(net, zone, orig_tuple, tuple, range)) {
+		/* try the original tuple first */
+		if (in_range(orig_tuple, range)) {
+			if (!nf_nat_used_tuple(orig_tuple, ct)) {
+				*tuple = *orig_tuple;
+				return;
+			}
+		} else if (find_appropriate_src(net, zone, orig_tuple, tuple,
+			   range)) {
 			pr_debug("get_unique_tuple: Found current src map\n");
 			if (!nf_nat_used_tuple(tuple, ct))
 				return;
@@ -266,7 +273,6 @@
 	struct net *net = nf_ct_net(ct);
 	struct nf_conntrack_tuple curr_tuple, new_tuple;
 	struct nf_conn_nat *nat;
-	int have_to_hash = !(ct->status & IPS_NAT_DONE_MASK);
 
 	/* nat helper or nfctnetlink also setup binding */
 	nat = nfct_nat(ct);
@@ -306,8 +312,7 @@
 			ct->status |= IPS_DST_NAT;
 	}
 
-	/* Place in source hash if this is the first time. */
-	if (have_to_hash) {
+	if (maniptype == IP_NAT_MANIP_SRC) {
 		unsigned int srchash;
 
 		srchash = hash_by_src(net, nf_ct_zone(ct),
@@ -323,9 +328,9 @@
 
 	/* It's done. */
 	if (maniptype == IP_NAT_MANIP_DST)
-		set_bit(IPS_DST_NAT_DONE_BIT, &ct->status);
+		ct->status |= IPS_DST_NAT_DONE;
 	else
-		set_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
+		ct->status |= IPS_SRC_NAT_DONE;
 
 	return NF_ACCEPT;
 }
@@ -502,7 +507,10 @@
 	int ret = 0;
 
 	spin_lock_bh(&nf_nat_lock);
-	if (nf_nat_protos[proto->protonum] != &nf_nat_unknown_protocol) {
+	if (rcu_dereference_protected(
+			nf_nat_protos[proto->protonum],
+			lockdep_is_held(&nf_nat_lock)
+			) != &nf_nat_unknown_protocol) {
 		ret = -EBUSY;
 		goto out;
 	}
@@ -532,7 +540,7 @@
 	if (nat == NULL || nat->ct == NULL)
 		return;
 
-	NF_CT_ASSERT(nat->ct->status & IPS_NAT_DONE_MASK);
+	NF_CT_ASSERT(nat->ct->status & IPS_SRC_NAT_DONE);
 
 	spin_lock_bh(&nf_nat_lock);
 	hlist_del_rcu(&nat->bysource);
@@ -545,11 +553,10 @@
 	struct nf_conn_nat *old_nat = old;
 	struct nf_conn *ct = old_nat->ct;
 
-	if (!ct || !(ct->status & IPS_NAT_DONE_MASK))
+	if (!ct || !(ct->status & IPS_SRC_NAT_DONE))
 		return;
 
 	spin_lock_bh(&nf_nat_lock);
-	new_nat->ct = ct;
 	hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
 	spin_unlock_bh(&nf_nat_lock);
 }
@@ -679,8 +686,7 @@
 {
 	/* Leave them the same for the moment. */
 	net->ipv4.nat_htable_size = net->ct.htable_size;
-	net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
-						       &net->ipv4.nat_vmalloced, 0);
+	net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, 0);
 	if (!net->ipv4.nat_bysource)
 		return -ENOMEM;
 	return 0;
@@ -702,8 +708,7 @@
 {
 	nf_ct_iterate_cleanup(net, &clean_nat, NULL);
 	synchronize_rcu();
-	nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
-			     net->ipv4.nat_htable_size);
+	nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_htable_size);
 }
 
 static struct pernet_operations nf_nat_net_ops = {
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index ee5f419..8812a02 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -54,6 +54,7 @@
 #include <net/netfilter/nf_conntrack_expect.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_nat_helper.h>
+#include <linux/netfilter/nf_conntrack_snmp.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
@@ -1310,9 +1311,9 @@
 {
 	int ret = 0;
 
-	ret = nf_conntrack_helper_register(&snmp_helper);
-	if (ret < 0)
-		return ret;
+	BUG_ON(nf_nat_snmp_hook != NULL);
+	rcu_assign_pointer(nf_nat_snmp_hook, help);
+
 	ret = nf_conntrack_helper_register(&snmp_trap_helper);
 	if (ret < 0) {
 		nf_conntrack_helper_unregister(&snmp_helper);
@@ -1323,7 +1324,7 @@
 
 static void __exit nf_nat_snmp_basic_fini(void)
 {
-	nf_conntrack_helper_unregister(&snmp_helper);
+	rcu_assign_pointer(nf_nat_snmp_hook, NULL);
 	nf_conntrack_helper_unregister(&snmp_trap_helper);
 }
 
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index a3d5ab7..6390ba2 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -76,6 +76,7 @@
 #include <linux/seq_file.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv4.h>
+#include <linux/compat.h>
 
 static struct raw_hashinfo raw_v4_hashinfo = {
 	.lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock),
@@ -838,6 +839,23 @@
 	}
 }
 
+#ifdef CONFIG_COMPAT
+static int compat_raw_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case SIOCOUTQ:
+	case SIOCINQ:
+		return -ENOIOCTLCMD;
+	default:
+#ifdef CONFIG_IP_MROUTE
+		return ipmr_compat_ioctl(sk, cmd, compat_ptr(arg));
+#else
+		return -ENOIOCTLCMD;
+#endif
+	}
+}
+#endif
+
 struct proto raw_prot = {
 	.name		   = "RAW",
 	.owner		   = THIS_MODULE,
@@ -860,6 +878,7 @@
 #ifdef CONFIG_COMPAT
 	.compat_setsockopt = compat_raw_setsockopt,
 	.compat_getsockopt = compat_raw_getsockopt,
+	.compat_ioctl	   = compat_raw_ioctl,
 #endif
 };
 
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 351dc4e..52b077d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -131,9 +131,6 @@
 static int ip_rt_min_advmss __read_mostly	= 256;
 static int rt_chain_length_max __read_mostly	= 20;
 
-static struct delayed_work expires_work;
-static unsigned long expires_ljiffies;
-
 /*
  *	Interface to generic destination cache.
  */
@@ -152,6 +149,41 @@
 {
 }
 
+static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
+{
+	struct rtable *rt = (struct rtable *) dst;
+	struct inet_peer *peer;
+	u32 *p = NULL;
+
+	if (!rt->peer)
+		rt_bind_peer(rt, 1);
+
+	peer = rt->peer;
+	if (peer) {
+		u32 *old_p = __DST_METRICS_PTR(old);
+		unsigned long prev, new;
+
+		p = peer->metrics;
+		if (inet_metrics_new(peer))
+			memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+
+		new = (unsigned long) p;
+		prev = cmpxchg(&dst->_metrics, old, new);
+
+		if (prev != old) {
+			p = __DST_METRICS_PTR(prev);
+			if (prev & DST_METRICS_READ_ONLY)
+				p = NULL;
+		} else {
+			if (rt->fi) {
+				fib_info_put(rt->fi);
+				rt->fi = NULL;
+			}
+		}
+	}
+	return p;
+}
+
 static struct dst_ops ipv4_dst_ops = {
 	.family =		AF_INET,
 	.protocol =		cpu_to_be16(ETH_P_IP),
@@ -159,6 +191,7 @@
 	.check =		ipv4_dst_check,
 	.default_advmss =	ipv4_default_advmss,
 	.default_mtu =		ipv4_default_mtu,
+	.cow_metrics =		ipv4_cow_metrics,
 	.destroy =		ipv4_dst_destroy,
 	.ifdown =		ipv4_dst_ifdown,
 	.negative_advice =	ipv4_negative_advice,
@@ -514,7 +547,7 @@
 	.release = seq_release,
 };
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 static int rt_acct_proc_show(struct seq_file *m, void *v)
 {
 	struct ip_rt_acct *dst, *src;
@@ -567,14 +600,14 @@
 	if (!pde)
 		goto err2;
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 	pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
 	if (!pde)
 		goto err3;
 #endif
 	return 0;
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 err3:
 	remove_proc_entry("rt_cache", net->proc_net_stat);
 #endif
@@ -588,7 +621,7 @@
 {
 	remove_proc_entry("rt_cache", net->proc_net_stat);
 	remove_proc_entry("rt_cache", net->proc_net);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 	remove_proc_entry("rt_acct", net->proc_net);
 #endif
 }
@@ -632,7 +665,7 @@
 static inline int rt_valuable(struct rtable *rth)
 {
 	return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
-		rth->dst.expires;
+		(rth->peer && rth->peer->pmtu_expires);
 }
 
 static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
@@ -643,13 +676,7 @@
 	if (atomic_read(&rth->dst.__refcnt))
 		goto out;
 
-	ret = 1;
-	if (rth->dst.expires &&
-	    time_after_eq(jiffies, rth->dst.expires))
-		goto out;
-
 	age = jiffies - rth->dst.lastuse;
-	ret = 0;
 	if ((age <= tmo1 && !rt_fast_clean(rth)) ||
 	    (age <= tmo2 && rt_valuable(rth)))
 		goto out;
@@ -793,97 +820,6 @@
 	return ONE;
 }
 
-static void rt_check_expire(void)
-{
-	static unsigned int rover;
-	unsigned int i = rover, goal;
-	struct rtable *rth;
-	struct rtable __rcu **rthp;
-	unsigned long samples = 0;
-	unsigned long sum = 0, sum2 = 0;
-	unsigned long delta;
-	u64 mult;
-
-	delta = jiffies - expires_ljiffies;
-	expires_ljiffies = jiffies;
-	mult = ((u64)delta) << rt_hash_log;
-	if (ip_rt_gc_timeout > 1)
-		do_div(mult, ip_rt_gc_timeout);
-	goal = (unsigned int)mult;
-	if (goal > rt_hash_mask)
-		goal = rt_hash_mask + 1;
-	for (; goal > 0; goal--) {
-		unsigned long tmo = ip_rt_gc_timeout;
-		unsigned long length;
-
-		i = (i + 1) & rt_hash_mask;
-		rthp = &rt_hash_table[i].chain;
-
-		if (need_resched())
-			cond_resched();
-
-		samples++;
-
-		if (rcu_dereference_raw(*rthp) == NULL)
-			continue;
-		length = 0;
-		spin_lock_bh(rt_hash_lock_addr(i));
-		while ((rth = rcu_dereference_protected(*rthp,
-					lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
-			prefetch(rth->dst.rt_next);
-			if (rt_is_expired(rth)) {
-				*rthp = rth->dst.rt_next;
-				rt_free(rth);
-				continue;
-			}
-			if (rth->dst.expires) {
-				/* Entry is expired even if it is in use */
-				if (time_before_eq(jiffies, rth->dst.expires)) {
-nofree:
-					tmo >>= 1;
-					rthp = &rth->dst.rt_next;
-					/*
-					 * We only count entries on
-					 * a chain with equal hash inputs once
-					 * so that entries for different QOS
-					 * levels, and other non-hash input
-					 * attributes don't unfairly skew
-					 * the length computation
-					 */
-					length += has_noalias(rt_hash_table[i].chain, rth);
-					continue;
-				}
-			} else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
-				goto nofree;
-
-			/* Cleanup aged off entries. */
-			*rthp = rth->dst.rt_next;
-			rt_free(rth);
-		}
-		spin_unlock_bh(rt_hash_lock_addr(i));
-		sum += length;
-		sum2 += length*length;
-	}
-	if (samples) {
-		unsigned long avg = sum / samples;
-		unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
-		rt_chain_length_max = max_t(unsigned long,
-					ip_rt_gc_elasticity,
-					(avg + 4*sd) >> FRACT_BITS);
-	}
-	rover = i;
-}
-
-/*
- * rt_worker_func() is run in process context.
- * we call rt_check_expire() to scan part of the hash table
- */
-static void rt_worker_func(struct work_struct *work)
-{
-	rt_check_expire();
-	schedule_delayed_work(&expires_work, ip_rt_gc_interval);
-}
-
 /*
  * Pertubation of rt_genid by a small quantity [1..256]
  * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
@@ -1272,6 +1208,13 @@
 	return 0;
 }
 
+static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
+
+static u32 rt_peer_genid(void)
+{
+	return atomic_read(&__rt_peer_genid);
+}
+
 void rt_bind_peer(struct rtable *rt, int create)
 {
 	struct inet_peer *peer;
@@ -1280,6 +1223,8 @@
 
 	if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
 		inet_putpeer(peer);
+	else
+		rt->rt_peer_genid = rt_peer_genid();
 }
 
 /*
@@ -1349,13 +1294,8 @@
 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
 		    __be32 saddr, struct net_device *dev)
 {
-	int i, k;
 	struct in_device *in_dev = __in_dev_get_rcu(dev);
-	struct rtable *rth;
-	struct rtable __rcu **rthp;
-	__be32  skeys[2] = { saddr, 0 };
-	int  ikeys[2] = { dev->ifindex, 0 };
-	struct netevent_redirect netevent;
+	struct inet_peer *peer;
 	struct net *net;
 
 	if (!in_dev)
@@ -1367,9 +1307,6 @@
 	    ipv4_is_zeronet(new_gw))
 		goto reject_redirect;
 
-	if (!rt_caching(net))
-		goto reject_redirect;
-
 	if (!IN_DEV_SHARED_MEDIA(in_dev)) {
 		if (!inet_addr_onlink(in_dev, new_gw, old_gw))
 			goto reject_redirect;
@@ -1380,91 +1317,13 @@
 			goto reject_redirect;
 	}
 
-	for (i = 0; i < 2; i++) {
-		for (k = 0; k < 2; k++) {
-			unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
-						rt_genid(net));
+	peer = inet_getpeer_v4(daddr, 1);
+	if (peer) {
+		peer->redirect_learned.a4 = new_gw;
 
-			rthp = &rt_hash_table[hash].chain;
+		inet_putpeer(peer);
 
-			while ((rth = rcu_dereference(*rthp)) != NULL) {
-				struct rtable *rt;
-
-				if (rth->fl.fl4_dst != daddr ||
-				    rth->fl.fl4_src != skeys[i] ||
-				    rth->fl.oif != ikeys[k] ||
-				    rt_is_input_route(rth) ||
-				    rt_is_expired(rth) ||
-				    !net_eq(dev_net(rth->dst.dev), net)) {
-					rthp = &rth->dst.rt_next;
-					continue;
-				}
-
-				if (rth->rt_dst != daddr ||
-				    rth->rt_src != saddr ||
-				    rth->dst.error ||
-				    rth->rt_gateway != old_gw ||
-				    rth->dst.dev != dev)
-					break;
-
-				dst_hold(&rth->dst);
-
-				rt = dst_alloc(&ipv4_dst_ops);
-				if (rt == NULL) {
-					ip_rt_put(rth);
-					return;
-				}
-
-				/* Copy all the information. */
-				*rt = *rth;
-				rt->dst.__use		= 1;
-				atomic_set(&rt->dst.__refcnt, 1);
-				rt->dst.child		= NULL;
-				if (rt->dst.dev)
-					dev_hold(rt->dst.dev);
-				rt->dst.obsolete	= -1;
-				rt->dst.lastuse	= jiffies;
-				rt->dst.path		= &rt->dst;
-				rt->dst.neighbour	= NULL;
-				rt->dst.hh		= NULL;
-#ifdef CONFIG_XFRM
-				rt->dst.xfrm		= NULL;
-#endif
-				rt->rt_genid		= rt_genid(net);
-				rt->rt_flags		|= RTCF_REDIRECTED;
-
-				/* Gateway is different ... */
-				rt->rt_gateway		= new_gw;
-
-				/* Redirect received -> path was valid */
-				dst_confirm(&rth->dst);
-
-				if (rt->peer)
-					atomic_inc(&rt->peer->refcnt);
-
-				if (arp_bind_neighbour(&rt->dst) ||
-				    !(rt->dst.neighbour->nud_state &
-					    NUD_VALID)) {
-					if (rt->dst.neighbour)
-						neigh_event_send(rt->dst.neighbour, NULL);
-					ip_rt_put(rth);
-					rt_drop(rt);
-					goto do_next;
-				}
-
-				netevent.old = &rth->dst;
-				netevent.new = &rt->dst;
-				call_netevent_notifiers(NETEVENT_REDIRECT,
-							&netevent);
-
-				rt_del(hash, rth);
-				if (!rt_intern_hash(hash, rt, &rt, NULL, rt->fl.oif))
-					ip_rt_put(rt);
-				goto do_next;
-			}
-		do_next:
-			;
-		}
+		atomic_inc(&__rt_peer_genid);
 	}
 	return;
 
@@ -1488,9 +1347,7 @@
 		if (dst->obsolete > 0) {
 			ip_rt_put(rt);
 			ret = NULL;
-		} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
-			   (rt->dst.expires &&
-			    time_after_eq(jiffies, rt->dst.expires))) {
+		} else if (rt->rt_flags & RTCF_REDIRECTED) {
 			unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
 						rt->fl.oif,
 						rt_genid(dev_net(dst->dev)));
@@ -1500,6 +1357,14 @@
 #endif
 			rt_del(hash, rt);
 			ret = NULL;
+		} else if (rt->peer &&
+			   rt->peer->pmtu_expires &&
+			   time_after_eq(jiffies, rt->peer->pmtu_expires)) {
+			unsigned long orig = rt->peer->pmtu_expires;
+
+			if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
+				dst_metric_set(dst, RTAX_MTU,
+					       rt->peer->pmtu_orig);
 		}
 	}
 	return ret;
@@ -1525,6 +1390,7 @@
 {
 	struct rtable *rt = skb_rtable(skb);
 	struct in_device *in_dev;
+	struct inet_peer *peer;
 	int log_martians;
 
 	rcu_read_lock();
@@ -1536,33 +1402,41 @@
 	log_martians = IN_DEV_LOG_MARTIANS(in_dev);
 	rcu_read_unlock();
 
+	if (!rt->peer)
+		rt_bind_peer(rt, 1);
+	peer = rt->peer;
+	if (!peer) {
+		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
+		return;
+	}
+
 	/* No redirected packets during ip_rt_redirect_silence;
 	 * reset the algorithm.
 	 */
-	if (time_after(jiffies, rt->dst.rate_last + ip_rt_redirect_silence))
-		rt->dst.rate_tokens = 0;
+	if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
+		peer->rate_tokens = 0;
 
 	/* Too many ignored redirects; do not send anything
 	 * set dst.rate_last to the last seen redirected packet.
 	 */
-	if (rt->dst.rate_tokens >= ip_rt_redirect_number) {
-		rt->dst.rate_last = jiffies;
+	if (peer->rate_tokens >= ip_rt_redirect_number) {
+		peer->rate_last = jiffies;
 		return;
 	}
 
 	/* Check for load limit; set rate_last to the latest sent
 	 * redirect.
 	 */
-	if (rt->dst.rate_tokens == 0 ||
+	if (peer->rate_tokens == 0 ||
 	    time_after(jiffies,
-		       (rt->dst.rate_last +
-			(ip_rt_redirect_load << rt->dst.rate_tokens)))) {
+		       (peer->rate_last +
+			(ip_rt_redirect_load << peer->rate_tokens)))) {
 		icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
-		rt->dst.rate_last = jiffies;
-		++rt->dst.rate_tokens;
+		peer->rate_last = jiffies;
+		++peer->rate_tokens;
 #ifdef CONFIG_IP_ROUTE_VERBOSE
 		if (log_martians &&
-		    rt->dst.rate_tokens == ip_rt_redirect_number &&
+		    peer->rate_tokens == ip_rt_redirect_number &&
 		    net_ratelimit())
 			printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
 				&rt->rt_src, rt->rt_iif,
@@ -1574,7 +1448,9 @@
 static int ip_error(struct sk_buff *skb)
 {
 	struct rtable *rt = skb_rtable(skb);
+	struct inet_peer *peer;
 	unsigned long now;
+	bool send;
 	int code;
 
 	switch (rt->dst.error) {
@@ -1594,15 +1470,24 @@
 			break;
 	}
 
-	now = jiffies;
-	rt->dst.rate_tokens += now - rt->dst.rate_last;
-	if (rt->dst.rate_tokens > ip_rt_error_burst)
-		rt->dst.rate_tokens = ip_rt_error_burst;
-	rt->dst.rate_last = now;
-	if (rt->dst.rate_tokens >= ip_rt_error_cost) {
-		rt->dst.rate_tokens -= ip_rt_error_cost;
-		icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
+	if (!rt->peer)
+		rt_bind_peer(rt, 1);
+	peer = rt->peer;
+
+	send = true;
+	if (peer) {
+		now = jiffies;
+		peer->rate_tokens += now - peer->rate_last;
+		if (peer->rate_tokens > ip_rt_error_burst)
+			peer->rate_tokens = ip_rt_error_burst;
+		peer->rate_last = now;
+		if (peer->rate_tokens >= ip_rt_error_cost)
+			peer->rate_tokens -= ip_rt_error_cost;
+		else
+			send = false;
 	}
+	if (send)
+		icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
 
 out:	kfree_skb(skb);
 	return 0;
@@ -1630,88 +1515,130 @@
 				 unsigned short new_mtu,
 				 struct net_device *dev)
 {
-	int i, k;
 	unsigned short old_mtu = ntohs(iph->tot_len);
-	struct rtable *rth;
-	int  ikeys[2] = { dev->ifindex, 0 };
-	__be32  skeys[2] = { iph->saddr, 0, };
-	__be32  daddr = iph->daddr;
 	unsigned short est_mtu = 0;
+	struct inet_peer *peer;
 
-	for (k = 0; k < 2; k++) {
-		for (i = 0; i < 2; i++) {
-			unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
-						rt_genid(net));
+	peer = inet_getpeer_v4(iph->daddr, 1);
+	if (peer) {
+		unsigned short mtu = new_mtu;
 
-			rcu_read_lock();
-			for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
-			     rth = rcu_dereference(rth->dst.rt_next)) {
-				unsigned short mtu = new_mtu;
-
-				if (rth->fl.fl4_dst != daddr ||
-				    rth->fl.fl4_src != skeys[i] ||
-				    rth->rt_dst != daddr ||
-				    rth->rt_src != iph->saddr ||
-				    rth->fl.oif != ikeys[k] ||
-				    rt_is_input_route(rth) ||
-				    dst_metric_locked(&rth->dst, RTAX_MTU) ||
-				    !net_eq(dev_net(rth->dst.dev), net) ||
-				    rt_is_expired(rth))
-					continue;
-
-				if (new_mtu < 68 || new_mtu >= old_mtu) {
-
-					/* BSD 4.2 compatibility hack :-( */
-					if (mtu == 0 &&
-					    old_mtu >= dst_mtu(&rth->dst) &&
-					    old_mtu >= 68 + (iph->ihl << 2))
-						old_mtu -= iph->ihl << 2;
-
-					mtu = guess_mtu(old_mtu);
-				}
-				if (mtu <= dst_mtu(&rth->dst)) {
-					if (mtu < dst_mtu(&rth->dst)) {
-						dst_confirm(&rth->dst);
-						if (mtu < ip_rt_min_pmtu) {
-							u32 lock = dst_metric(&rth->dst,
-									      RTAX_LOCK);
-							mtu = ip_rt_min_pmtu;
-							lock |= (1 << RTAX_MTU);
-							dst_metric_set(&rth->dst, RTAX_LOCK,
-								       lock);
-						}
-						dst_metric_set(&rth->dst, RTAX_MTU, mtu);
-						dst_set_expires(&rth->dst,
-							ip_rt_mtu_expires);
-					}
-					est_mtu = mtu;
-				}
-			}
-			rcu_read_unlock();
+		if (new_mtu < 68 || new_mtu >= old_mtu) {
+			/* BSD 4.2 derived systems incorrectly adjust
+			 * tot_len by the IP header length, and report
+			 * a zero MTU in the ICMP message.
+			 */
+			if (mtu == 0 &&
+			    old_mtu >= 68 + (iph->ihl << 2))
+				old_mtu -= iph->ihl << 2;
+			mtu = guess_mtu(old_mtu);
 		}
+
+		if (mtu < ip_rt_min_pmtu)
+			mtu = ip_rt_min_pmtu;
+		if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
+			est_mtu = mtu;
+			peer->pmtu_learned = mtu;
+			peer->pmtu_expires = jiffies + ip_rt_mtu_expires;
+		}
+
+		inet_putpeer(peer);
+
+		atomic_inc(&__rt_peer_genid);
 	}
 	return est_mtu ? : new_mtu;
 }
 
+static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
+{
+	unsigned long expires = peer->pmtu_expires;
+
+	if (time_before(expires, jiffies)) {
+		u32 orig_dst_mtu = dst_mtu(dst);
+		if (peer->pmtu_learned < orig_dst_mtu) {
+			if (!peer->pmtu_orig)
+				peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
+			dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
+		}
+	} else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
+		dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
+}
+
 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
-	if (dst_mtu(dst) > mtu && mtu >= 68 &&
-	    !(dst_metric_locked(dst, RTAX_MTU))) {
-		if (mtu < ip_rt_min_pmtu) {
-			u32 lock = dst_metric(dst, RTAX_LOCK);
+	struct rtable *rt = (struct rtable *) dst;
+	struct inet_peer *peer;
+
+	dst_confirm(dst);
+
+	if (!rt->peer)
+		rt_bind_peer(rt, 1);
+	peer = rt->peer;
+	if (peer) {
+		if (mtu < ip_rt_min_pmtu)
 			mtu = ip_rt_min_pmtu;
-			dst_metric_set(dst, RTAX_LOCK, lock | (1 << RTAX_MTU));
+		if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
+			peer->pmtu_learned = mtu;
+			peer->pmtu_expires = jiffies + ip_rt_mtu_expires;
+
+			atomic_inc(&__rt_peer_genid);
+			rt->rt_peer_genid = rt_peer_genid();
+
+			check_peer_pmtu(dst, peer);
 		}
-		dst_metric_set(dst, RTAX_MTU, mtu);
-		dst_set_expires(dst, ip_rt_mtu_expires);
-		call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
+		inet_putpeer(peer);
 	}
 }
 
+static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
+{
+	struct rtable *rt = (struct rtable *) dst;
+	__be32 orig_gw = rt->rt_gateway;
+
+	dst_confirm(&rt->dst);
+
+	neigh_release(rt->dst.neighbour);
+	rt->dst.neighbour = NULL;
+
+	rt->rt_gateway = peer->redirect_learned.a4;
+	if (arp_bind_neighbour(&rt->dst) ||
+	    !(rt->dst.neighbour->nud_state & NUD_VALID)) {
+		if (rt->dst.neighbour)
+			neigh_event_send(rt->dst.neighbour, NULL);
+		rt->rt_gateway = orig_gw;
+		return -EAGAIN;
+	} else {
+		rt->rt_flags |= RTCF_REDIRECTED;
+		call_netevent_notifiers(NETEVENT_NEIGH_UPDATE,
+					rt->dst.neighbour);
+	}
+	return 0;
+}
+
 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
 {
-	if (rt_is_expired((struct rtable *)dst))
+	struct rtable *rt = (struct rtable *) dst;
+
+	if (rt_is_expired(rt))
 		return NULL;
+	if (rt->rt_peer_genid != rt_peer_genid()) {
+		struct inet_peer *peer;
+
+		if (!rt->peer)
+			rt_bind_peer(rt, 0);
+
+		peer = rt->peer;
+		if (peer && peer->pmtu_expires)
+			check_peer_pmtu(dst, peer);
+
+		if (peer && peer->redirect_learned.a4 &&
+		    peer->redirect_learned.a4 != rt->rt_gateway) {
+			if (check_peer_redir(dst, peer))
+				return NULL;
+		}
+
+		rt->rt_peer_genid = rt_peer_genid();
+	}
 	return dst;
 }
 
@@ -1720,6 +1647,10 @@
 	struct rtable *rt = (struct rtable *) dst;
 	struct inet_peer *peer = rt->peer;
 
+	if (rt->fi) {
+		fib_info_put(rt->fi);
+		rt->fi = NULL;
+	}
 	if (peer) {
 		rt->peer = NULL;
 		inet_putpeer(peer);
@@ -1734,8 +1665,14 @@
 	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
 
 	rt = skb_rtable(skb);
-	if (rt)
-		dst_set_expires(&rt->dst, 0);
+	if (rt &&
+	    rt->peer &&
+	    rt->peer->pmtu_expires) {
+		unsigned long orig = rt->peer->pmtu_expires;
+
+		if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
+			dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
+	}
 }
 
 static int ip_rt_bug(struct sk_buff *skb)
@@ -1775,7 +1712,7 @@
 	memcpy(addr, &src, 4);
 }
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 static void set_class_tag(struct rtable *rt, u32 tag)
 {
 	if (!(rt->dst.tclassid & 0xFFFF))
@@ -1815,17 +1752,52 @@
 	return mtu;
 }
 
-static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
+static void rt_init_metrics(struct rtable *rt, struct fib_info *fi)
+{
+	struct inet_peer *peer;
+	int create = 0;
+
+	/* If a peer entry exists for this destination, we must hook
+	 * it up in order to get at cached metrics.
+	 */
+	if (rt->fl.flags & FLOWI_FLAG_PRECOW_METRICS)
+		create = 1;
+
+	rt_bind_peer(rt, create);
+	peer = rt->peer;
+	if (peer) {
+		if (inet_metrics_new(peer))
+			memcpy(peer->metrics, fi->fib_metrics,
+			       sizeof(u32) * RTAX_MAX);
+		dst_init_metrics(&rt->dst, peer->metrics, false);
+
+		if (peer->pmtu_expires)
+			check_peer_pmtu(&rt->dst, peer);
+		if (peer->redirect_learned.a4 &&
+		    peer->redirect_learned.a4 != rt->rt_gateway) {
+			rt->rt_gateway = peer->redirect_learned.a4;
+			rt->rt_flags |= RTCF_REDIRECTED;
+		}
+	} else {
+		if (fi->fib_metrics != (u32 *) dst_default_metrics) {
+			rt->fi = fi;
+			atomic_inc(&fi->fib_clntref);
+		}
+		dst_init_metrics(&rt->dst, fi->fib_metrics, true);
+	}
+}
+
+static void rt_set_nexthop(struct rtable *rt, const struct fib_result *res,
+			   struct fib_info *fi, u16 type, u32 itag)
 {
 	struct dst_entry *dst = &rt->dst;
-	struct fib_info *fi = res->fi;
 
 	if (fi) {
 		if (FIB_RES_GW(*res) &&
 		    FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
 			rt->rt_gateway = FIB_RES_GW(*res);
-		dst_import_metrics(dst, fi->fib_metrics);
-#ifdef CONFIG_NET_CLS_ROUTE
+		rt_init_metrics(rt, fi);
+#ifdef CONFIG_IP_ROUTE_CLASSID
 		dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
 #endif
 	}
@@ -1835,13 +1807,26 @@
 	if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
 		dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 #ifdef CONFIG_IP_MULTIPLE_TABLES
 	set_class_tag(rt, fib_rules_tclass(res));
 #endif
 	set_class_tag(rt, itag);
 #endif
-	rt->rt_type = res->type;
+	rt->rt_type = type;
+}
+
+static struct rtable *rt_dst_alloc(bool nopolicy, bool noxfrm)
+{
+	struct rtable *rt = dst_alloc(&ipv4_dst_ops, 1);
+	if (rt) {
+		rt->dst.obsolete = -1;
+
+		rt->dst.flags = DST_HOST |
+			(nopolicy ? DST_NOPOLICY : 0) |
+			(noxfrm ? DST_NOXFRM : 0);
+	}
+	return rt;
 }
 
 /* called in rcu_read_lock() section */
@@ -1874,24 +1859,19 @@
 		if (err < 0)
 			goto e_err;
 	}
-	rth = dst_alloc(&ipv4_dst_ops);
+	rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
 	if (!rth)
 		goto e_nobufs;
 
 	rth->dst.output = ip_rt_bug;
-	rth->dst.obsolete = -1;
 
-	atomic_set(&rth->dst.__refcnt, 1);
-	rth->dst.flags= DST_HOST;
-	if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
-		rth->dst.flags |= DST_NOPOLICY;
 	rth->fl.fl4_dst	= daddr;
 	rth->rt_dst	= daddr;
 	rth->fl.fl4_tos	= tos;
 	rth->fl.mark    = skb->mark;
 	rth->fl.fl4_src	= saddr;
 	rth->rt_src	= saddr;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 	rth->dst.tclassid = itag;
 #endif
 	rth->rt_iif	=
@@ -1959,7 +1939,7 @@
 
 /* called in rcu_read_lock() section */
 static int __mkroute_input(struct sk_buff *skb,
-			   struct fib_result *res,
+			   const struct fib_result *res,
 			   struct in_device *in_dev,
 			   __be32 daddr, __be32 saddr, u32 tos,
 			   struct rtable **result)
@@ -2013,19 +1993,13 @@
 		}
 	}
 
-
-	rth = dst_alloc(&ipv4_dst_ops);
+	rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY),
+			   IN_DEV_CONF_GET(out_dev, NOXFRM));
 	if (!rth) {
 		err = -ENOBUFS;
 		goto cleanup;
 	}
 
-	atomic_set(&rth->dst.__refcnt, 1);
-	rth->dst.flags= DST_HOST;
-	if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
-		rth->dst.flags |= DST_NOPOLICY;
-	if (IN_DEV_CONF_GET(out_dev, NOXFRM))
-		rth->dst.flags |= DST_NOXFRM;
 	rth->fl.fl4_dst	= daddr;
 	rth->rt_dst	= daddr;
 	rth->fl.fl4_tos	= tos;
@@ -2040,12 +2014,11 @@
 	rth->fl.oif 	= 0;
 	rth->rt_spec_dst= spec_dst;
 
-	rth->dst.obsolete = -1;
 	rth->dst.input = ip_forward;
 	rth->dst.output = ip_output;
 	rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
 
-	rt_set_nexthop(rth, res, itag);
+	rt_set_nexthop(rth, res, res->fi, res->type, itag);
 
 	rth->rt_flags = flags;
 
@@ -2190,25 +2163,20 @@
 	RT_CACHE_STAT_INC(in_brd);
 
 local_input:
-	rth = dst_alloc(&ipv4_dst_ops);
+	rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
 	if (!rth)
 		goto e_nobufs;
 
 	rth->dst.output= ip_rt_bug;
-	rth->dst.obsolete = -1;
 	rth->rt_genid = rt_genid(net);
 
-	atomic_set(&rth->dst.__refcnt, 1);
-	rth->dst.flags= DST_HOST;
-	if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
-		rth->dst.flags |= DST_NOPOLICY;
 	rth->fl.fl4_dst	= daddr;
 	rth->rt_dst	= daddr;
 	rth->fl.fl4_tos	= tos;
 	rth->fl.mark    = skb->mark;
 	rth->fl.fl4_src	= saddr;
 	rth->rt_src	= saddr;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 	rth->dst.tclassid = itag;
 #endif
 	rth->rt_iif	=
@@ -2351,38 +2319,39 @@
 EXPORT_SYMBOL(ip_route_input_common);
 
 /* called with rcu_read_lock() */
-static int __mkroute_output(struct rtable **result,
-			    struct fib_result *res,
-			    const struct flowi *fl,
-			    const struct flowi *oldflp,
-			    struct net_device *dev_out,
-			    unsigned flags)
+static struct rtable *__mkroute_output(const struct fib_result *res,
+				       const struct flowi *fl,
+				       const struct flowi *oldflp,
+				       struct net_device *dev_out,
+				       unsigned int flags)
 {
-	struct rtable *rth;
-	struct in_device *in_dev;
+	struct fib_info *fi = res->fi;
 	u32 tos = RT_FL_TOS(oldflp);
+	struct in_device *in_dev;
+	u16 type = res->type;
+	struct rtable *rth;
 
 	if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags & IFF_LOOPBACK))
-		return -EINVAL;
+		return ERR_PTR(-EINVAL);
 
 	if (ipv4_is_lbcast(fl->fl4_dst))
-		res->type = RTN_BROADCAST;
+		type = RTN_BROADCAST;
 	else if (ipv4_is_multicast(fl->fl4_dst))
-		res->type = RTN_MULTICAST;
+		type = RTN_MULTICAST;
 	else if (ipv4_is_zeronet(fl->fl4_dst))
-		return -EINVAL;
+		return ERR_PTR(-EINVAL);
 
 	if (dev_out->flags & IFF_LOOPBACK)
 		flags |= RTCF_LOCAL;
 
 	in_dev = __in_dev_get_rcu(dev_out);
 	if (!in_dev)
-		return -EINVAL;
+		return ERR_PTR(-EINVAL);
 
-	if (res->type == RTN_BROADCAST) {
+	if (type == RTN_BROADCAST) {
 		flags |= RTCF_BROADCAST | RTCF_LOCAL;
-		res->fi = NULL;
-	} else if (res->type == RTN_MULTICAST) {
+		fi = NULL;
+	} else if (type == RTN_MULTICAST) {
 		flags |= RTCF_MULTICAST | RTCF_LOCAL;
 		if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
 				 oldflp->proto))
@@ -2391,21 +2360,14 @@
 		 * default one, but do not gateway in this case.
 		 * Yes, it is hack.
 		 */
-		if (res->fi && res->prefixlen < 4)
-			res->fi = NULL;
+		if (fi && res->prefixlen < 4)
+			fi = NULL;
 	}
 
-
-	rth = dst_alloc(&ipv4_dst_ops);
+	rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY),
+			   IN_DEV_CONF_GET(in_dev, NOXFRM));
 	if (!rth)
-		return -ENOBUFS;
-
-	atomic_set(&rth->dst.__refcnt, 1);
-	rth->dst.flags= DST_HOST;
-	if (IN_DEV_CONF_GET(in_dev, NOXFRM))
-		rth->dst.flags |= DST_NOXFRM;
-	if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
-		rth->dst.flags |= DST_NOPOLICY;
+		return ERR_PTR(-ENOBUFS);
 
 	rth->fl.fl4_dst	= oldflp->fl4_dst;
 	rth->fl.fl4_tos	= tos;
@@ -2423,7 +2385,6 @@
 	rth->rt_spec_dst= fl->fl4_src;
 
 	rth->dst.output=ip_output;
-	rth->dst.obsolete = -1;
 	rth->rt_genid = rt_genid(dev_net(dev_out));
 
 	RT_CACHE_STAT_INC(out_slow_tot);
@@ -2440,7 +2401,7 @@
 			RT_CACHE_STAT_INC(out_slow_mc);
 		}
 #ifdef CONFIG_IP_MROUTE
-		if (res->type == RTN_MULTICAST) {
+		if (type == RTN_MULTICAST) {
 			if (IN_DEV_MFORWARD(in_dev) &&
 			    !ipv4_is_local_multicast(oldflp->fl4_dst)) {
 				rth->dst.input = ip_mr_input;
@@ -2450,31 +2411,10 @@
 #endif
 	}
 
-	rt_set_nexthop(rth, res, 0);
+	rt_set_nexthop(rth, res, fi, type, 0);
 
 	rth->rt_flags = flags;
-	*result = rth;
-	return 0;
-}
-
-/* called with rcu_read_lock() */
-static int ip_mkroute_output(struct rtable **rp,
-			     struct fib_result *res,
-			     const struct flowi *fl,
-			     const struct flowi *oldflp,
-			     struct net_device *dev_out,
-			     unsigned flags)
-{
-	struct rtable *rth = NULL;
-	int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
-	unsigned hash;
-	if (err == 0) {
-		hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
-			       rt_genid(dev_net(dev_out)));
-		err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif);
-	}
-
-	return err;
+	return rth;
 }
 
 /*
@@ -2497,6 +2437,7 @@
 	struct fib_result res;
 	unsigned int flags = 0;
 	struct net_device *dev_out = NULL;
+	struct rtable *rth;
 	int err;
 
 
@@ -2505,6 +2446,7 @@
 	res.r		= NULL;
 #endif
 
+	rcu_read_lock();
 	if (oldflp->fl4_src) {
 		err = -EINVAL;
 		if (ipv4_is_multicast(oldflp->fl4_src) ||
@@ -2645,7 +2587,7 @@
 	else
 #endif
 	if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
-		fib_select_default(net, &fl, &res);
+		fib_select_default(&res);
 
 	if (!fl.fl4_src)
 		fl.fl4_src = FIB_RES_PREFSRC(res);
@@ -2655,17 +2597,27 @@
 
 
 make_route:
-	err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
+	rth = __mkroute_output(&res, &fl, oldflp, dev_out, flags);
+	if (IS_ERR(rth))
+		err = PTR_ERR(rth);
+	else {
+		unsigned int hash;
 
-out:	return err;
+		hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
+			       rt_genid(dev_net(dev_out)));
+		err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif);
+	}
+
+out:
+	rcu_read_unlock();
+	return err;
 }
 
 int __ip_route_output_key(struct net *net, struct rtable **rp,
 			  const struct flowi *flp)
 {
-	unsigned int hash;
-	int res;
 	struct rtable *rth;
+	unsigned int hash;
 
 	if (!rt_caching(net))
 		goto slow_output;
@@ -2695,10 +2647,7 @@
 	rcu_read_unlock_bh();
 
 slow_output:
-	rcu_read_lock();
-	res = ip_route_output_slow(net, rp, flp);
-	rcu_read_unlock();
-	return res;
+	return ip_route_output_slow(net, rp, flp);
 }
 EXPORT_SYMBOL_GPL(__ip_route_output_key);
 
@@ -2707,6 +2656,11 @@
 	return NULL;
 }
 
+static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
+{
+	return 0;
+}
+
 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
 }
@@ -2716,6 +2670,8 @@
 	.protocol		=	cpu_to_be16(ETH_P_IP),
 	.destroy		=	ipv4_dst_destroy,
 	.check			=	ipv4_blackhole_dst_check,
+	.default_mtu		=	ipv4_blackhole_default_mtu,
+	.default_advmss		=	ipv4_default_advmss,
 	.update_pmtu		=	ipv4_rt_blackhole_update_pmtu,
 };
 
@@ -2724,12 +2680,11 @@
 {
 	struct rtable *ort = *rp;
 	struct rtable *rt = (struct rtable *)
-		dst_alloc(&ipv4_dst_blackhole_ops);
+		dst_alloc(&ipv4_dst_blackhole_ops, 1);
 
 	if (rt) {
 		struct dst_entry *new = &rt->dst;
 
-		atomic_set(&new->__refcnt, 1);
 		new->__use = 1;
 		new->input = dst_discard;
 		new->output = dst_discard;
@@ -2752,6 +2707,9 @@
 		rt->peer = ort->peer;
 		if (rt->peer)
 			atomic_inc(&rt->peer->refcnt);
+		rt->fi = ort->fi;
+		if (rt->fi)
+			atomic_inc(&rt->fi->fib_clntref);
 
 		dst_free(new);
 	}
@@ -2828,7 +2786,7 @@
 	}
 	if (rt->dst.dev)
 		NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 	if (rt->dst.tclassid)
 		NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
 #endif
@@ -2847,7 +2805,8 @@
 		NLA_PUT_BE32(skb, RTA_MARK, rt->fl.mark);
 
 	error = rt->dst.error;
-	expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
+	expires = (rt->peer && rt->peer->pmtu_expires) ?
+		rt->peer->pmtu_expires - jiffies : 0;
 	if (rt->peer) {
 		inet_peer_refcheck(rt->peer);
 		id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
@@ -3249,9 +3208,9 @@
 };
 
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
-#endif /* CONFIG_NET_CLS_ROUTE */
+#endif /* CONFIG_IP_ROUTE_CLASSID */
 
 static __initdata unsigned long rhash_entries;
 static int __init set_rhash_entries(char *str)
@@ -3267,7 +3226,7 @@
 {
 	int rc = 0;
 
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 	ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
 	if (!ip_rt_acct)
 		panic("IP: failed to allocate ip_rt_acct\n");
@@ -3304,14 +3263,6 @@
 	devinet_init();
 	ip_fib_init();
 
-	/* All the timers, started at system startup tend
-	   to synchronize. Perturb it a bit.
-	 */
-	INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
-	expires_ljiffies = jiffies;
-	schedule_delayed_work(&expires_work,
-		net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
-
 	if (ip_rt_proc_init())
 		printk(KERN_ERR "Unable to create route proc files\n");
 #ifdef CONFIG_XFRM
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6c11eec..a17a5a7 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -873,9 +873,7 @@
 					flags);
 
 	lock_sock(sk);
-	TCP_CHECK_TIMER(sk);
 	res = do_tcp_sendpages(sk, &page, offset, size, flags);
-	TCP_CHECK_TIMER(sk);
 	release_sock(sk);
 	return res;
 }
@@ -916,7 +914,6 @@
 	long timeo;
 
 	lock_sock(sk);
-	TCP_CHECK_TIMER(sk);
 
 	flags = msg->msg_flags;
 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
@@ -1104,7 +1101,6 @@
 out:
 	if (copied)
 		tcp_push(sk, flags, mss_now, tp->nonagle);
-	TCP_CHECK_TIMER(sk);
 	release_sock(sk);
 	return copied;
 
@@ -1123,7 +1119,6 @@
 		goto out;
 out_err:
 	err = sk_stream_error(sk, flags, err);
-	TCP_CHECK_TIMER(sk);
 	release_sock(sk);
 	return err;
 }
@@ -1415,8 +1410,6 @@
 
 	lock_sock(sk);
 
-	TCP_CHECK_TIMER(sk);
-
 	err = -ENOTCONN;
 	if (sk->sk_state == TCP_LISTEN)
 		goto out;
@@ -1767,12 +1760,10 @@
 	/* Clean up data we have read: This will do ACK frames. */
 	tcp_cleanup_rbuf(sk, copied);
 
-	TCP_CHECK_TIMER(sk);
 	release_sock(sk);
 	return copied;
 
 out:
-	TCP_CHECK_TIMER(sk);
 	release_sock(sk);
 	return err;
 
@@ -2653,7 +2644,7 @@
 EXPORT_SYMBOL(compat_tcp_getsockopt);
 #endif
 
-struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
+struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features)
 {
 	struct sk_buff *segs = ERR_PTR(-EINVAL);
 	struct tcphdr *th;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2549b29..2f692ce 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -817,7 +817,7 @@
 	__u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
 
 	if (!cwnd)
-		cwnd = rfc3390_bytes_to_packets(tp->mss_cache);
+		cwnd = TCP_INIT_CWND;
 	return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
 }
 
@@ -4399,7 +4399,7 @@
 			if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) {
 				tp->ucopy.len -= chunk;
 				tp->copied_seq += chunk;
-				eaten = (chunk == skb->len && !th->fin);
+				eaten = (chunk == skb->len);
 				tcp_rcv_space_adjust(sk);
 			}
 			local_bh_disable();
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 856f684..ef5a90b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1341,7 +1341,7 @@
 		    tcp_death_row.sysctl_tw_recycle &&
 		    (dst = inet_csk_route_req(sk, req)) != NULL &&
 		    (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
-		    peer->daddr.a4 == saddr) {
+		    peer->daddr.addr.a4 == saddr) {
 			inet_peer_refcheck(peer);
 			if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
 			    (s32)(peer->tcp_ts - req->ts_recent) >
@@ -1556,12 +1556,10 @@
 
 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
 		sock_rps_save_rxhash(sk, skb->rxhash);
-		TCP_CHECK_TIMER(sk);
 		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
 			rsk = sk;
 			goto reset;
 		}
-		TCP_CHECK_TIMER(sk);
 		return 0;
 	}
 
@@ -1583,13 +1581,10 @@
 	} else
 		sock_rps_save_rxhash(sk, skb->rxhash);
 
-
-	TCP_CHECK_TIMER(sk);
 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
 		rsk = sk;
 		goto reset;
 	}
-	TCP_CHECK_TIMER(sk);
 	return 0;
 
 reset:
@@ -1994,7 +1989,6 @@
 				}
 				req = req->dl_next;
 			}
-			st->offset = 0;
 			if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
 				break;
 get_req:
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 74a6aa0..ecd44b0 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -259,7 +259,6 @@
 		tcp_send_ack(sk);
 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
 	}
-	TCP_CHECK_TIMER(sk);
 
 out:
 	if (tcp_memory_pressure)
@@ -481,7 +480,6 @@
 		tcp_probe_timer(sk);
 		break;
 	}
-	TCP_CHECK_TIMER(sk);
 
 out:
 	sk_mem_reclaim(sk);
@@ -589,7 +587,6 @@
 		elapsed = keepalive_time_when(tp) - elapsed;
 	}
 
-	TCP_CHECK_TIMER(sk);
 	sk_mem_reclaim(sk);
 
 resched:
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8157b17..d37baaa 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2199,7 +2199,7 @@
 	return 0;
 }
 
-struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features)
+struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, u32 features)
 {
 	struct sk_buff *segs = ERR_PTR(-EINVAL);
 	unsigned int mss;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index b057d40..19fbdec 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -196,8 +196,11 @@
 {
 	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
 
+	dst_destroy_metrics_generic(dst);
+
 	if (likely(xdst->u.rt.peer))
 		inet_putpeer(xdst->u.rt.peer);
+
 	xfrm_dst_destroy(xdst);
 }
 
@@ -215,6 +218,7 @@
 	.protocol =		cpu_to_be16(ETH_P_IP),
 	.gc =			xfrm4_garbage_collect,
 	.update_pmtu =		xfrm4_update_pmtu,
+	.cow_metrics =		dst_cow_metrics_generic,
 	.destroy =		xfrm4_dst_destroy,
 	.ifdown =		xfrm4_dst_ifdown,
 	.local_out =		__ip_local_out,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 5b189c9..fd6782e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -420,9 +420,6 @@
 	    dev->type == ARPHRD_TUNNEL6 ||
 	    dev->type == ARPHRD_SIT ||
 	    dev->type == ARPHRD_NONE) {
-		printk(KERN_INFO
-		       "%s: Disabled Privacy Extensions\n",
-		       dev->name);
 		ndev->cnf.use_tempaddr = -1;
 	} else {
 		in6_dev_hold(ndev);
@@ -2664,14 +2661,12 @@
 	struct net *net = dev_net(dev);
 	struct inet6_dev *idev;
 	struct inet6_ifaddr *ifa;
-	LIST_HEAD(keep_list);
-	int state;
+	int state, i;
 
 	ASSERT_RTNL();
 
-	/* Flush routes if device is being removed or it is not loopback */
-	if (how || !(dev->flags & IFF_LOOPBACK))
-		rt6_ifdown(net, dev);
+	rt6_ifdown(net, dev);
+	neigh_ifdown(&nd_tbl, dev);
 
 	idev = __in6_dev_get(dev);
 	if (idev == NULL)
@@ -2692,6 +2687,23 @@
 
 	}
 
+	/* Step 2: clear hash table */
+	for (i = 0; i < IN6_ADDR_HSIZE; i++) {
+		struct hlist_head *h = &inet6_addr_lst[i];
+		struct hlist_node *n;
+
+		spin_lock_bh(&addrconf_hash_lock);
+	restart:
+		hlist_for_each_entry_rcu(ifa, n, h, addr_lst) {
+			if (ifa->idev == idev) {
+				hlist_del_init_rcu(&ifa->addr_lst);
+				addrconf_del_timer(ifa);
+				goto restart;
+			}
+		}
+		spin_unlock_bh(&addrconf_hash_lock);
+	}
+
 	write_lock_bh(&idev->lock);
 
 	/* Step 2: clear flags for stateless addrconf */
@@ -2725,52 +2737,23 @@
 				       struct inet6_ifaddr, if_list);
 		addrconf_del_timer(ifa);
 
-		/* If just doing link down, and address is permanent
-		   and not link-local, then retain it. */
-		if (!how &&
-		    (ifa->flags&IFA_F_PERMANENT) &&
-		    !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
-			list_move_tail(&ifa->if_list, &keep_list);
+		list_del(&ifa->if_list);
 
-			/* If not doing DAD on this address, just keep it. */
-			if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
-			    idev->cnf.accept_dad <= 0 ||
-			    (ifa->flags & IFA_F_NODAD))
-				continue;
+		write_unlock_bh(&idev->lock);
 
-			/* If it was tentative already, no need to notify */
-			if (ifa->flags & IFA_F_TENTATIVE)
-				continue;
+		spin_lock_bh(&ifa->state_lock);
+		state = ifa->state;
+		ifa->state = INET6_IFADDR_STATE_DEAD;
+		spin_unlock_bh(&ifa->state_lock);
 
-			/* Flag it for later restoration when link comes up */
-			ifa->flags |= IFA_F_TENTATIVE;
-			ifa->state = INET6_IFADDR_STATE_DAD;
-		} else {
-			list_del(&ifa->if_list);
-
-			/* clear hash table */
-			spin_lock_bh(&addrconf_hash_lock);
-			hlist_del_init_rcu(&ifa->addr_lst);
-			spin_unlock_bh(&addrconf_hash_lock);
-
-			write_unlock_bh(&idev->lock);
-			spin_lock_bh(&ifa->state_lock);
-			state = ifa->state;
-			ifa->state = INET6_IFADDR_STATE_DEAD;
-			spin_unlock_bh(&ifa->state_lock);
-
-			if (state != INET6_IFADDR_STATE_DEAD) {
-				__ipv6_ifa_notify(RTM_DELADDR, ifa);
-				atomic_notifier_call_chain(&inet6addr_chain,
-							   NETDEV_DOWN, ifa);
-			}
-
-			in6_ifa_put(ifa);
-			write_lock_bh(&idev->lock);
+		if (state != INET6_IFADDR_STATE_DEAD) {
+			__ipv6_ifa_notify(RTM_DELADDR, ifa);
+			atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
 		}
-	}
+		in6_ifa_put(ifa);
 
-	list_splice(&keep_list, &idev->addr_list);
+		write_lock_bh(&idev->lock);
+	}
 
 	write_unlock_bh(&idev->lock);
 
@@ -4159,8 +4142,7 @@
 		addrconf_leave_solict(ifp->idev, &ifp->addr);
 		dst_hold(&ifp->rt->dst);
 
-		if (ifp->state == INET6_IFADDR_STATE_DEAD &&
-		    ip6_del_rt(ifp->rt))
+		if (ip6_del_rt(ifp->rt))
 			dst_free(&ifp->rt->dst);
 		break;
 	}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 978e80e..3194aa9 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -772,7 +772,7 @@
 	return err;
 }
 
-static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
+static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, u32 features)
 {
 	struct sk_buff *segs = ERR_PTR(-EINVAL);
 	struct ipv6hdr *ipv6h;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 03e62f9..a31d91b 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -157,20 +157,20 @@
 /*
  * Check the ICMP output rate limit
  */
-static inline int icmpv6_xrlim_allow(struct sock *sk, u8 type,
-				     struct flowi *fl)
+static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
+				      struct flowi *fl)
 {
 	struct dst_entry *dst;
 	struct net *net = sock_net(sk);
-	int res = 0;
+	bool res = false;
 
 	/* Informational messages are not limited. */
 	if (type & ICMPV6_INFOMSG_MASK)
-		return 1;
+		return true;
 
 	/* Do not limit pmtu discovery, it would break it. */
 	if (type == ICMPV6_PKT_TOOBIG)
-		return 1;
+		return true;
 
 	/*
 	 * Look up the output route.
@@ -182,7 +182,7 @@
 		IP6_INC_STATS(net, ip6_dst_idev(dst),
 			      IPSTATS_MIB_OUTNOROUTES);
 	} else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
-		res = 1;
+		res = true;
 	} else {
 		struct rt6_info *rt = (struct rt6_info *)dst;
 		int tmo = net->ipv6.sysctl.icmpv6_time;
@@ -191,7 +191,9 @@
 		if (rt->rt6i_dst.plen < 128)
 			tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
 
-		res = xrlim_allow(dst, tmo);
+		if (!rt->rt6i_peer)
+			rt6_bind_peer(rt, 1);
+		res = inet_peer_xrlim_allow(rt->rt6i_peer, tmo);
 	}
 	dst_release(dst);
 	return res;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 5f8d242..2600e22 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -479,10 +479,13 @@
 		else
 			target = &hdr->daddr;
 
+		if (!rt->rt6i_peer)
+			rt6_bind_peer(rt, 1);
+
 		/* Limit redirects both by destination (here)
 		   and by source (inside ndisc_send_redirect)
 		 */
-		if (xrlim_allow(dst, 1*HZ))
+		if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
 			ndisc_send_redirect(skb, n, target);
 	} else {
 		int addrtype = ipv6_addr_type(&hdr->saddr);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 9fab274..0e1d53b 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -34,6 +34,7 @@
 #include <linux/seq_file.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <linux/compat.h>
 #include <net/protocol.h>
 #include <linux/skbuff.h>
 #include <net/sock.h>
@@ -1804,6 +1805,80 @@
 	}
 }
 
+#ifdef CONFIG_COMPAT
+struct compat_sioc_sg_req6 {
+	struct sockaddr_in6 src;
+	struct sockaddr_in6 grp;
+	compat_ulong_t pktcnt;
+	compat_ulong_t bytecnt;
+	compat_ulong_t wrong_if;
+};
+
+struct compat_sioc_mif_req6 {
+	mifi_t	mifi;
+	compat_ulong_t icount;
+	compat_ulong_t ocount;
+	compat_ulong_t ibytes;
+	compat_ulong_t obytes;
+};
+
+int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
+{
+	struct compat_sioc_sg_req6 sr;
+	struct compat_sioc_mif_req6 vr;
+	struct mif_device *vif;
+	struct mfc6_cache *c;
+	struct net *net = sock_net(sk);
+	struct mr6_table *mrt;
+
+	mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
+	if (mrt == NULL)
+		return -ENOENT;
+
+	switch (cmd) {
+	case SIOCGETMIFCNT_IN6:
+		if (copy_from_user(&vr, arg, sizeof(vr)))
+			return -EFAULT;
+		if (vr.mifi >= mrt->maxvif)
+			return -EINVAL;
+		read_lock(&mrt_lock);
+		vif = &mrt->vif6_table[vr.mifi];
+		if (MIF_EXISTS(mrt, vr.mifi)) {
+			vr.icount = vif->pkt_in;
+			vr.ocount = vif->pkt_out;
+			vr.ibytes = vif->bytes_in;
+			vr.obytes = vif->bytes_out;
+			read_unlock(&mrt_lock);
+
+			if (copy_to_user(arg, &vr, sizeof(vr)))
+				return -EFAULT;
+			return 0;
+		}
+		read_unlock(&mrt_lock);
+		return -EADDRNOTAVAIL;
+	case SIOCGETSGCNT_IN6:
+		if (copy_from_user(&sr, arg, sizeof(sr)))
+			return -EFAULT;
+
+		read_lock(&mrt_lock);
+		c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
+		if (c) {
+			sr.pktcnt = c->mfc_un.res.pkt;
+			sr.bytecnt = c->mfc_un.res.bytes;
+			sr.wrong_if = c->mfc_un.res.wrong_if;
+			read_unlock(&mrt_lock);
+
+			if (copy_to_user(arg, &sr, sizeof(sr)))
+				return -EFAULT;
+			return 0;
+		}
+		read_unlock(&mrt_lock);
+		return -EADDRNOTAVAIL;
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+#endif
 
 static inline int ip6mr_forward2_finish(struct sk_buff *skb)
 {
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 2342545..7254ce3 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1553,7 +1553,9 @@
 			   "ICMPv6 Redirect: destination is not a neighbour.\n");
 		goto release;
 	}
-	if (!xrlim_allow(dst, 1*HZ))
+	if (!rt->rt6i_peer)
+		rt6_bind_peer(rt, 1);
+	if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
 		goto release;
 
 	if (dev->addr_len) {
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 7d227c6..47b7b8d 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1076,6 +1076,7 @@
 	memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
 	newinfo->initial_entries = 0;
 	loc_cpu_entry = info->entries[raw_smp_processor_id()];
+	xt_compat_init_offsets(AF_INET6, info->number);
 	xt_entry_foreach(iter, loc_cpu_entry, info->size) {
 		ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
 		if (ret != 0)
@@ -1679,6 +1680,7 @@
 	duprintf("translate_compat_table: size %u\n", info->size);
 	j = 0;
 	xt_compat_lock(AF_INET6);
+	xt_compat_init_offsets(AF_INET6, number);
 	/* Walk through entries, checking offsets. */
 	xt_entry_foreach(iter0, entry0, total_size) {
 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index 09c8889..e6af8d7 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -410,7 +410,7 @@
 		if (p != NULL) {
 			sb_add(m, "%02x", *p++);
 			for (i = 1; i < len; i++)
-				sb_add(m, ":%02x", p[i]);
+				sb_add(m, ":%02x", *p++);
 		}
 		sb_add(m, " ");
 
@@ -452,8 +452,7 @@
 	       in ? in->name : "",
 	       out ? out->name : "");
 
-	/* MAC logging for input path only. */
-	if (in && !out)
+	if (in != NULL)
 		dump_mac_header(m, loginfo, skb);
 
 	dump_packet(m, loginfo, skb, skb_network_offset(skb), 1);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 79d43aa..0857272 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -45,6 +45,7 @@
 #include <linux/netfilter_ipv6.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
 
 
 struct nf_ct_frag6_skb_cb
@@ -73,7 +74,7 @@
 static struct netns_frags nf_init_frags;
 
 #ifdef CONFIG_SYSCTL
-struct ctl_table nf_ct_frag6_sysctl_table[] = {
+static struct ctl_table nf_ct_frag6_sysctl_table[] = {
 	{
 		.procname	= "nf_conntrack_frag6_timeout",
 		.data		= &nf_init_frags.timeout,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 86c3952..364e866 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -31,6 +31,7 @@
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv6.h>
 #include <linux/skbuff.h>
+#include <linux/compat.h>
 #include <asm/uaccess.h>
 #include <asm/ioctls.h>
 
@@ -123,18 +124,18 @@
 }
 
 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
-static int (*mh_filter)(struct sock *sock, struct sk_buff *skb);
+typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb);
 
-int rawv6_mh_filter_register(int (*filter)(struct sock *sock,
-					   struct sk_buff *skb))
+static mh_filter_t __rcu *mh_filter __read_mostly;
+
+int rawv6_mh_filter_register(mh_filter_t filter)
 {
 	rcu_assign_pointer(mh_filter, filter);
 	return 0;
 }
 EXPORT_SYMBOL(rawv6_mh_filter_register);
 
-int rawv6_mh_filter_unregister(int (*filter)(struct sock *sock,
-					     struct sk_buff *skb))
+int rawv6_mh_filter_unregister(mh_filter_t filter)
 {
 	rcu_assign_pointer(mh_filter, NULL);
 	synchronize_rcu();
@@ -192,10 +193,10 @@
 			 * policy is placed in rawv6_rcv() because it is
 			 * required for each socket.
 			 */
-			int (*filter)(struct sock *sock, struct sk_buff *skb);
+			mh_filter_t *filter;
 
 			filter = rcu_dereference(mh_filter);
-			filtered = filter ? filter(sk, skb) : 0;
+			filtered = filter ? (*filter)(sk, skb) : 0;
 			break;
 		}
 #endif
@@ -1157,6 +1158,23 @@
 	}
 }
 
+#ifdef CONFIG_COMPAT
+static int compat_rawv6_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case SIOCOUTQ:
+	case SIOCINQ:
+		return -ENOIOCTLCMD;
+	default:
+#ifdef CONFIG_IPV6_MROUTE
+		return ip6mr_compat_ioctl(sk, cmd, compat_ptr(arg));
+#else
+		return -ENOIOCTLCMD;
+#endif
+	}
+}
+#endif
+
 static void rawv6_close(struct sock *sk, long timeout)
 {
 	if (inet_sk(sk)->inet_num == IPPROTO_RAW)
@@ -1215,6 +1233,7 @@
 #ifdef CONFIG_COMPAT
 	.compat_setsockopt = compat_rawv6_setsockopt,
 	.compat_getsockopt = compat_rawv6_getsockopt,
+	.compat_ioctl	   = compat_rawv6_ioctl,
 #endif
 };
 
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 373bd04..f786aed 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -72,8 +72,6 @@
 #define RT6_TRACE(x...) do { ; } while (0)
 #endif
 
-#define CLONE_OFFLINK_ROUTE 0
-
 static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
 static struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
 static unsigned int	 ip6_default_advmss(const struct dst_entry *dst);
@@ -99,6 +97,36 @@
 					   struct in6_addr *gwaddr, int ifindex);
 #endif
 
+static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
+{
+	struct rt6_info *rt = (struct rt6_info *) dst;
+	struct inet_peer *peer;
+	u32 *p = NULL;
+
+	if (!rt->rt6i_peer)
+		rt6_bind_peer(rt, 1);
+
+	peer = rt->rt6i_peer;
+	if (peer) {
+		u32 *old_p = __DST_METRICS_PTR(old);
+		unsigned long prev, new;
+
+		p = peer->metrics;
+		if (inet_metrics_new(peer))
+			memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+
+		new = (unsigned long) p;
+		prev = cmpxchg(&dst->_metrics, old, new);
+
+		if (prev != old) {
+			p = __DST_METRICS_PTR(prev);
+			if (prev & DST_METRICS_READ_ONLY)
+				p = NULL;
+		}
+	}
+	return p;
+}
+
 static struct dst_ops ip6_dst_ops_template = {
 	.family			=	AF_INET6,
 	.protocol		=	cpu_to_be16(ETH_P_IPV6),
@@ -107,6 +135,7 @@
 	.check			=	ip6_dst_check,
 	.default_advmss		=	ip6_default_advmss,
 	.default_mtu		=	ip6_default_mtu,
+	.cow_metrics		=	ipv6_cow_metrics,
 	.destroy		=	ip6_dst_destroy,
 	.ifdown			=	ip6_dst_ifdown,
 	.negative_advice	=	ip6_negative_advice,
@@ -115,6 +144,11 @@
 	.local_out		=	__ip6_local_out,
 };
 
+static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst)
+{
+	return 0;
+}
+
 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
 }
@@ -124,9 +158,15 @@
 	.protocol		=	cpu_to_be16(ETH_P_IPV6),
 	.destroy		=	ip6_dst_destroy,
 	.check			=	ip6_dst_check,
+	.default_mtu		=	ip6_blackhole_default_mtu,
+	.default_advmss		=	ip6_default_advmss,
 	.update_pmtu		=	ip6_rt_blackhole_update_pmtu,
 };
 
+static const u32 ip6_template_metrics[RTAX_MAX] = {
+	[RTAX_HOPLIMIT - 1] = 255,
+};
+
 static struct rt6_info ip6_null_entry_template = {
 	.dst = {
 		.__refcnt	= ATOMIC_INIT(1),
@@ -182,7 +222,7 @@
 /* allocate dst with ip6_dst_ops */
 static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops)
 {
-	return (struct rt6_info *)dst_alloc(ops);
+	return (struct rt6_info *)dst_alloc(ops, 0);
 }
 
 static void ip6_dst_destroy(struct dst_entry *dst)
@@ -196,22 +236,27 @@
 		in6_dev_put(idev);
 	}
 	if (peer) {
-		BUG_ON(!(rt->rt6i_flags & RTF_CACHE));
 		rt->rt6i_peer = NULL;
 		inet_putpeer(peer);
 	}
 }
 
+static atomic_t __rt6_peer_genid = ATOMIC_INIT(0);
+
+static u32 rt6_peer_genid(void)
+{
+	return atomic_read(&__rt6_peer_genid);
+}
+
 void rt6_bind_peer(struct rt6_info *rt, int create)
 {
 	struct inet_peer *peer;
 
-	if (WARN_ON(!(rt->rt6i_flags & RTF_CACHE)))
-		return;
-
 	peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
 	if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
 		inet_putpeer(peer);
+	else
+		rt->rt6i_peer_genid = rt6_peer_genid();
 }
 
 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -738,13 +783,8 @@
 
 	if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
 		nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
-	else {
-#if CLONE_OFFLINK_ROUTE
+	else
 		nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
-#else
-		goto out2;
-#endif
-	}
 
 	dst_release(&rt->dst);
 	rt = nrt ? : net->ipv6.ip6_null_entry;
@@ -834,13 +874,12 @@
 {
 	struct rt6_info *ort = (struct rt6_info *) *dstp;
 	struct rt6_info *rt = (struct rt6_info *)
-		dst_alloc(&ip6_dst_blackhole_ops);
+		dst_alloc(&ip6_dst_blackhole_ops, 1);
 	struct dst_entry *new = NULL;
 
 	if (rt) {
 		new = &rt->dst;
 
-		atomic_set(&new->__refcnt, 1);
 		new->__use = 1;
 		new->input = dst_discard;
 		new->output = dst_discard;
@@ -882,9 +921,14 @@
 
 	rt = (struct rt6_info *) dst;
 
-	if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
+	if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) {
+		if (rt->rt6i_peer_genid != rt6_peer_genid()) {
+			if (!rt->rt6i_peer)
+				rt6_bind_peer(rt, 0);
+			rt->rt6i_peer_genid = rt6_peer_genid();
+		}
 		return dst;
-
+	}
 	return NULL;
 }
 
@@ -935,7 +979,6 @@
 			dst_metric_set(dst, RTAX_FEATURES, features);
 		}
 		dst_metric_set(dst, RTAX_MTU, mtu);
-		call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
 	}
 }
 
@@ -2688,7 +2731,8 @@
 	net->ipv6.ip6_null_entry->dst.path =
 		(struct dst_entry *)net->ipv6.ip6_null_entry;
 	net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
-	dst_metric_set(&net->ipv6.ip6_null_entry->dst, RTAX_HOPLIMIT, 255);
+	dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
+			 ip6_template_metrics, true);
 
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 	net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
@@ -2699,7 +2743,8 @@
 	net->ipv6.ip6_prohibit_entry->dst.path =
 		(struct dst_entry *)net->ipv6.ip6_prohibit_entry;
 	net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
-	dst_metric_set(&net->ipv6.ip6_prohibit_entry->dst, RTAX_HOPLIMIT, 255);
+	dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
+			 ip6_template_metrics, true);
 
 	net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
 					       sizeof(*net->ipv6.ip6_blk_hole_entry),
@@ -2709,7 +2754,8 @@
 	net->ipv6.ip6_blk_hole_entry->dst.path =
 		(struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
 	net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
-	dst_metric_set(&net->ipv6.ip6_blk_hole_entry->dst, RTAX_HOPLIMIT, 255);
+	dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
+			 ip6_template_metrics, true);
 #endif
 
 	net->ipv6.sysctl.flush_delay = 0;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 8ce38f1..b1599a3 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -412,7 +412,7 @@
 
 	p = container_of(head, struct ip_tunnel_prl_entry, rcu_head);
 	do {
-		n = p->next;
+		n = rcu_dereference_protected(p->next, 1);
 		kfree(p);
 		p = n;
 	} while (p);
@@ -421,15 +421,17 @@
 static int
 ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
 {
-	struct ip_tunnel_prl_entry *x, **p;
+	struct ip_tunnel_prl_entry *x;
+	struct ip_tunnel_prl_entry __rcu **p;
 	int err = 0;
 
 	ASSERT_RTNL();
 
 	if (a && a->addr != htonl(INADDR_ANY)) {
-		for (p = &t->prl; *p; p = &(*p)->next) {
-			if ((*p)->addr == a->addr) {
-				x = *p;
+		for (p = &t->prl;
+		     (x = rtnl_dereference(*p)) != NULL;
+		     p = &x->next) {
+			if (x->addr == a->addr) {
 				*p = x->next;
 				call_rcu(&x->rcu_head, prl_entry_destroy_rcu);
 				t->prl_count--;
@@ -438,9 +440,9 @@
 		}
 		err = -ENXIO;
 	} else {
-		if (t->prl) {
+		x = rtnl_dereference(t->prl);
+		if (x) {
 			t->prl_count = 0;
-			x = t->prl;
 			call_rcu(&x->rcu_head, prl_list_destroy_rcu);
 			t->prl = NULL;
 		}
@@ -1179,7 +1181,7 @@
 	if (!dev->tstats)
 		return -ENOMEM;
 	dev_hold(dev);
-	sitn->tunnels_wc[0]	= tunnel;
+	rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
 	return 0;
 }
 
@@ -1196,11 +1198,12 @@
 	for (prio = 1; prio < 4; prio++) {
 		int h;
 		for (h = 0; h < HASH_SIZE; h++) {
-			struct ip_tunnel *t = sitn->tunnels[prio][h];
+			struct ip_tunnel *t;
 
+			t = rtnl_dereference(sitn->tunnels[prio][h]);
 			while (t != NULL) {
 				unregister_netdevice_queue(t->dev, head);
-				t = t->next;
+				t = rtnl_dereference(t->next);
 			}
 		}
 	}
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index fa1d8f4..7cb65ef 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -15,6 +15,8 @@
 #include <net/addrconf.h>
 #include <net/inet_frag.h>
 
+static struct ctl_table empty[1];
+
 static ctl_table ipv6_table_template[] = {
 	{
 		.procname	= "route",
@@ -35,6 +37,12 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec
 	},
+	{
+		.procname	= "neigh",
+		.maxlen		= 0,
+		.mode		= 0555,
+		.child		= empty,
+	},
 	{ }
 };
 
@@ -152,7 +160,6 @@
 
 int ipv6_static_sysctl_register(void)
 {
-	static struct ctl_table empty[1];
 	ip6_base = register_sysctl_paths(net_ipv6_ctl_path, empty);
 	if (ip6_base == NULL)
 		return -ENOMEM;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 20aa95e..1d0ab55 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1323,7 +1323,7 @@
 		    tcp_death_row.sysctl_tw_recycle &&
 		    (dst = inet6_csk_route_req(sk, req)) != NULL &&
 		    (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
-		    ipv6_addr_equal((struct in6_addr *)peer->daddr.a6,
+		    ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
 				    &treq->rmt_addr)) {
 			inet_peer_refcheck(peer);
 			if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
@@ -1636,10 +1636,8 @@
 		opt_skb = skb_clone(skb, GFP_ATOMIC);
 
 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
-		TCP_CHECK_TIMER(sk);
 		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
 			goto reset;
-		TCP_CHECK_TIMER(sk);
 		if (opt_skb)
 			goto ipv6_pktoptions;
 		return 0;
@@ -1667,10 +1665,8 @@
 		}
 	}
 
-	TCP_CHECK_TIMER(sk);
 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
 		goto reset;
-	TCP_CHECK_TIMER(sk);
 	if (opt_skb)
 		goto ipv6_pktoptions;
 	return 0;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 9a009c6..a419a78 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1299,7 +1299,7 @@
 	return 0;
 }
 
-static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, int features)
+static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
 {
 	struct sk_buff *segs = ERR_PTR(-EINVAL);
 	unsigned int mss;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 7e74023..834dc02 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -98,6 +98,10 @@
 	if (!xdst->u.rt6.rt6i_idev)
 		return -ENODEV;
 
+	xdst->u.rt6.rt6i_peer = rt->rt6i_peer;
+	if (rt->rt6i_peer)
+		atomic_inc(&rt->rt6i_peer->refcnt);
+
 	/* Sheit... I remember I did this right. Apparently,
 	 * it was magically lost, so this code needs audit */
 	xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST |
@@ -216,6 +220,9 @@
 
 	if (likely(xdst->u.rt6.rt6i_idev))
 		in6_dev_put(xdst->u.rt6.rt6i_idev);
+	dst_destroy_metrics_generic(dst);
+	if (likely(xdst->u.rt6.rt6i_peer))
+		inet_putpeer(xdst->u.rt6.rt6i_peer);
 	xfrm_dst_destroy(xdst);
 }
 
@@ -251,6 +258,7 @@
 	.protocol =		cpu_to_be16(ETH_P_IPV6),
 	.gc =			xfrm6_garbage_collect,
 	.update_pmtu =		xfrm6_update_pmtu,
+	.cow_metrics =		dst_cow_metrics_generic,
 	.destroy =		xfrm6_dst_destroy,
 	.ifdown =		xfrm6_dst_ifdown,
 	.local_out =		__ip6_local_out,
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 4c57a9c..dbf5e40 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -20,7 +20,7 @@
 	bool
 
 config MAC80211_RC_PID
-	bool "PID controller based rate control algorithm" if EMBEDDED
+	bool "PID controller based rate control algorithm" if EXPERT
 	select MAC80211_HAS_RC
 	---help---
 	  This option enables a TX rate control algorithm for
@@ -28,14 +28,14 @@
 	  rate.
 
 config MAC80211_RC_MINSTREL
-	bool "Minstrel" if EMBEDDED
+	bool "Minstrel" if EXPERT
 	select MAC80211_HAS_RC
 	default y
 	---help---
 	  This option enables the 'minstrel' TX rate control algorithm
 
 config MAC80211_RC_MINSTREL_HT
-	bool "Minstrel 802.11n support" if EMBEDDED
+	bool "Minstrel 802.11n support" if EXPERT
 	depends on MAC80211_RC_MINSTREL
 	default y
 	---help---
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 1534f2b..82a6e0d 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -85,6 +85,17 @@
 
 	  If unsure, say `N'.
 
+config NF_CONNTRACK_TIMESTAMP
+	bool  'Connection tracking timestamping'
+	depends on NETFILTER_ADVANCED
+	help
+	  This option enables support for connection tracking timestamping.
+	  This allows you to store the flow start-time and to obtain
+	  the flow-stop time (once it has been destroyed) via Connection
+	  tracking events.
+
+	  If unsure, say `N'.
+
 config NF_CT_PROTO_DCCP
 	tristate 'DCCP protocol connection tracking support (EXPERIMENTAL)'
 	depends on EXPERIMENTAL
@@ -185,9 +196,13 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NF_CONNTRACK_BROADCAST
+	tristate
+
 config NF_CONNTRACK_NETBIOS_NS
 	tristate "NetBIOS name service protocol support"
 	depends on NETFILTER_ADVANCED
+	select NF_CONNTRACK_BROADCAST
 	help
 	  NetBIOS name service requests are sent as broadcast messages from an
 	  unprivileged port and responded to with unicast messages to the
@@ -204,6 +219,21 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NF_CONNTRACK_SNMP
+	tristate "SNMP service protocol support"
+	depends on NETFILTER_ADVANCED
+	select NF_CONNTRACK_BROADCAST
+	help
+	  SNMP service requests are sent as broadcast messages from an
+	  unprivileged port and responded to with unicast messages to the
+	  same port. This make them hard to firewall properly because connection
+	  tracking doesn't deal with broadcasts. This helper tracks locally
+	  originating SNMP service requests and the corresponding
+	  responses. It relies on correct IP address configuration, specifically
+	  netmask and broadcast address.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 config NF_CONNTRACK_PPTP
 	tristate "PPtP protocol support"
 	depends on NETFILTER_ADVANCED
@@ -322,10 +352,32 @@
 	ctmark), similarly to the packet mark (nfmark). Using this
 	target and match, you can set and match on this mark.
 
+config NETFILTER_XT_SET
+	tristate 'set target and match support'
+	depends on IP_SET
+	depends on NETFILTER_ADVANCED
+	help
+	  This option adds the "SET" target and "set" match.
+
+	  Using this target and match, you can add/delete and match
+	  elements in the sets created by ipset(8).
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 # alphabetically ordered list of targets
 
 comment "Xtables targets"
 
+config NETFILTER_XT_TARGET_AUDIT
+	tristate "AUDIT target support"
+	depends on AUDIT
+	depends on NETFILTER_ADVANCED
+	---help---
+	  This option adds a 'AUDIT' target, which can be used to create
+	  audit records for packets dropped/accepted.
+
+	  To compileit as a module, choose M here. If unsure, say N.
+
 config NETFILTER_XT_TARGET_CHECKSUM
 	tristate "CHECKSUM target support"
 	depends on IP_NF_MANGLE || IP6_NF_MANGLE
@@ -477,6 +529,7 @@
 config NETFILTER_XT_TARGET_NFQUEUE
 	tristate '"NFQUEUE" target Support'
 	depends on NETFILTER_ADVANCED
+	select NETFILTER_NETLINK_QUEUE
 	help
 	  This target replaced the old obsolete QUEUE target.
 
@@ -685,6 +738,15 @@
 	  If you want to compile it as a module, say M here and read
 	  <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
 
+config NETFILTER_XT_MATCH_DEVGROUP
+	tristate '"devgroup" match support'
+	depends on NETFILTER_ADVANCED
+	help
+	  This options adds a `devgroup' match, which allows to match on the
+	  device group a network device is assigned to.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
 config NETFILTER_XT_MATCH_DSCP
 	tristate '"dscp" and "tos" match support'
 	depends on NETFILTER_ADVANCED
@@ -886,7 +948,7 @@
 config NETFILTER_XT_MATCH_REALM
 	tristate  '"realm" match support'
 	depends on NETFILTER_ADVANCED
-	select NET_CLS_ROUTE
+	select IP_ROUTE_CLASSID
 	help
 	  This option adds a `realm' match, which allows you to use the realm
 	  key from the routing subsystem inside iptables.
@@ -1011,4 +1073,6 @@
 
 endmenu
 
+source "net/netfilter/ipset/Kconfig"
+
 source "net/netfilter/ipvs/Kconfig"
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 441050f..d57a890 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -1,6 +1,7 @@
 netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
 
 nf_conntrack-y	:= nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o
+nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o
 nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
 
 obj-$(CONFIG_NETFILTER) = netfilter.o
@@ -28,7 +29,9 @@
 obj-$(CONFIG_NF_CONNTRACK_FTP) += nf_conntrack_ftp.o
 obj-$(CONFIG_NF_CONNTRACK_H323) += nf_conntrack_h323.o
 obj-$(CONFIG_NF_CONNTRACK_IRC) += nf_conntrack_irc.o
+obj-$(CONFIG_NF_CONNTRACK_BROADCAST) += nf_conntrack_broadcast.o
 obj-$(CONFIG_NF_CONNTRACK_NETBIOS_NS) += nf_conntrack_netbios_ns.o
+obj-$(CONFIG_NF_CONNTRACK_SNMP) += nf_conntrack_snmp.o
 obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_conntrack_pptp.o
 obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o
 obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o
@@ -43,8 +46,10 @@
 # combos
 obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o
 obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o
+obj-$(CONFIG_NETFILTER_XT_SET) += xt_set.o
 
 # targets
+obj-$(CONFIG_NETFILTER_XT_TARGET_AUDIT) += xt_AUDIT.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_CHECKSUM) += xt_CHECKSUM.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
 obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
@@ -72,6 +77,7 @@
 obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_CPU) += xt_cpu.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
@@ -101,5 +107,8 @@
 obj-$(CONFIG_NETFILTER_XT_MATCH_TIME) += xt_time.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_U32) += xt_u32.o
 
+# ipset
+obj-$(CONFIG_IP_SET) += ipset/
+
 # IPVS
 obj-$(CONFIG_IP_VS) += ipvs/
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 32fcbe2..899b71c 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -133,6 +133,7 @@
 
 		/* Optimization: we don't need to hold module
 		   reference here, since function can't sleep. --RR */
+repeat:
 		verdict = elem->hook(hook, skb, indev, outdev, okfn);
 		if (verdict != NF_ACCEPT) {
 #ifdef CONFIG_NETFILTER_DEBUG
@@ -145,7 +146,7 @@
 #endif
 			if (verdict != NF_REPEAT)
 				return verdict;
-			*i = (*i)->prev;
+			goto repeat;
 		}
 	}
 	return NF_ACCEPT;
@@ -175,13 +176,21 @@
 		ret = 1;
 	} else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
 		kfree_skb(skb);
-		ret = -(verdict >> NF_VERDICT_BITS);
+		ret = NF_DROP_GETERR(verdict);
 		if (ret == 0)
 			ret = -EPERM;
 	} else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
-		if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
-			      verdict >> NF_VERDICT_BITS))
-			goto next_hook;
+		ret = nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
+			       verdict >> NF_VERDICT_QBITS);
+		if (ret < 0) {
+			if (ret == -ECANCELED)
+				goto next_hook;
+			if (ret == -ESRCH &&
+			   (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
+				goto next_hook;
+			kfree_skb(skb);
+		}
+		ret = 0;
 	}
 	rcu_read_unlock();
 	return ret;
@@ -214,7 +223,7 @@
 /* This does not belong here, but locally generated errors need it if connection
    tracking in use: without this, connection may not be in hash table, and hence
    manufactured ICMP or RST packets will not be associated with it. */
-void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *);
+void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu __read_mostly;
 EXPORT_SYMBOL(ip_ct_attach);
 
 void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
@@ -231,7 +240,7 @@
 }
 EXPORT_SYMBOL(nf_ct_attach);
 
-void (*nf_ct_destroy)(struct nf_conntrack *);
+void (*nf_ct_destroy)(struct nf_conntrack *) __rcu __read_mostly;
 EXPORT_SYMBOL(nf_ct_destroy);
 
 void nf_conntrack_destroy(struct nf_conntrack *nfct)
diff --git a/net/netfilter/ipset/Kconfig b/net/netfilter/ipset/Kconfig
new file mode 100644
index 0000000..3b970d3
--- /dev/null
+++ b/net/netfilter/ipset/Kconfig
@@ -0,0 +1,121 @@
+menuconfig IP_SET
+	tristate "IP set support"
+	depends on INET && NETFILTER
+	help
+	  This option adds IP set support to the kernel.
+	  In order to define and use the sets, you need the userspace utility
+	  ipset(8). You can use the sets in netfilter via the "set" match
+	  and "SET" target.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+if IP_SET
+
+config IP_SET_MAX
+	int "Maximum number of IP sets"
+	default 256
+	range 2 65534
+	depends on IP_SET
+	help
+	  You can define here default value of the maximum number 
+	  of IP sets for the kernel.
+
+	  The value can be overriden by the 'max_sets' module
+	  parameter of the 'ip_set' module.
+
+config IP_SET_BITMAP_IP
+	tristate "bitmap:ip set support"
+	depends on IP_SET
+	help
+	  This option adds the bitmap:ip set type support, by which one
+	  can store IPv4 addresses (or network addresse) from a range.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_BITMAP_IPMAC
+	tristate "bitmap:ip,mac set support"
+	depends on IP_SET
+	help
+	  This option adds the bitmap:ip,mac set type support, by which one
+	  can store IPv4 address and (source) MAC address pairs from a range.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_BITMAP_PORT
+	tristate "bitmap:port set support"
+	depends on IP_SET
+	help
+	  This option adds the bitmap:port set type support, by which one
+	  can store TCP/UDP port numbers from a range.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_HASH_IP
+	tristate "hash:ip set support"
+	depends on IP_SET
+	help
+	  This option adds the hash:ip set type support, by which one
+	  can store arbitrary IPv4 or IPv6 addresses (or network addresses)
+	  in a set.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_HASH_IPPORT
+	tristate "hash:ip,port set support"
+	depends on IP_SET
+	help
+	  This option adds the hash:ip,port set type support, by which one
+	  can store IPv4/IPv6 address and protocol/port pairs.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_HASH_IPPORTIP
+	tristate "hash:ip,port,ip set support"
+	depends on IP_SET
+	help
+	  This option adds the hash:ip,port,ip set type support, by which
+	  one can store IPv4/IPv6 address, protocol/port, and IPv4/IPv6
+	  address triples in a set.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_HASH_IPPORTNET
+	tristate "hash:ip,port,net set support"
+	depends on IP_SET
+	help
+	  This option adds the hash:ip,port,net set type support, by which
+	  one can store IPv4/IPv6 address, protocol/port, and IPv4/IPv6
+	  network address/prefix triples in a set.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_HASH_NET
+	tristate "hash:net set support"
+	depends on IP_SET
+	help
+	  This option adds the hash:net set type support, by which
+	  one can store IPv4/IPv6 network address/prefix elements in a set.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_HASH_NETPORT
+	tristate "hash:net,port set support"
+	depends on IP_SET
+	help
+	  This option adds the hash:net,port set type support, by which
+	  one can store IPv4/IPv6 network address/prefix and
+	  protocol/port pairs as elements in a set.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+config IP_SET_LIST_SET
+	tristate "list:set set support"
+	depends on IP_SET
+	help
+	  This option adds the list:set set type support. In this
+	  kind of set one can store the name of other sets and it forms
+	  an ordered union of the member sets.
+
+	  To compile it as a module, choose M here.  If unsure, say N.
+
+endif # IP_SET
diff --git a/net/netfilter/ipset/Makefile b/net/netfilter/ipset/Makefile
new file mode 100644
index 0000000..5adbdab
--- /dev/null
+++ b/net/netfilter/ipset/Makefile
@@ -0,0 +1,24 @@
+#
+# Makefile for the ipset modules
+#
+
+ip_set-y := ip_set_core.o ip_set_getport.o pfxlen.o
+
+# ipset core
+obj-$(CONFIG_IP_SET) += ip_set.o
+
+# bitmap types
+obj-$(CONFIG_IP_SET_BITMAP_IP) += ip_set_bitmap_ip.o
+obj-$(CONFIG_IP_SET_BITMAP_IPMAC) += ip_set_bitmap_ipmac.o
+obj-$(CONFIG_IP_SET_BITMAP_PORT) += ip_set_bitmap_port.o
+
+# hash types
+obj-$(CONFIG_IP_SET_HASH_IP) += ip_set_hash_ip.o
+obj-$(CONFIG_IP_SET_HASH_IPPORT) += ip_set_hash_ipport.o
+obj-$(CONFIG_IP_SET_HASH_IPPORTIP) += ip_set_hash_ipportip.o
+obj-$(CONFIG_IP_SET_HASH_IPPORTNET) += ip_set_hash_ipportnet.o
+obj-$(CONFIG_IP_SET_HASH_NET) += ip_set_hash_net.o
+obj-$(CONFIG_IP_SET_HASH_NETPORT) += ip_set_hash_netport.o
+
+# list types
+obj-$(CONFIG_IP_SET_LIST_SET) += ip_set_list_set.o
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
new file mode 100644
index 0000000..bca9699
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -0,0 +1,587 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ *                         Patrick Schaaf <bof@bof.de>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the bitmap:ip type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <linux/spinlock.h>
+#include <linux/netlink.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_bitmap.h>
+#define IP_SET_BITMAP_TIMEOUT
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("bitmap:ip type of IP sets");
+MODULE_ALIAS("ip_set_bitmap:ip");
+
+/* Type structure */
+struct bitmap_ip {
+	void *members;		/* the set members */
+	u32 first_ip;		/* host byte order, included in range */
+	u32 last_ip;		/* host byte order, included in range */
+	u32 elements;		/* number of max elements in the set */
+	u32 hosts;		/* number of hosts in a subnet */
+	size_t memsize;		/* members size */
+	u8 netmask;		/* subnet netmask */
+	u32 timeout;		/* timeout parameter */
+	struct timer_list gc;	/* garbage collection */
+};
+
+/* Base variant */
+
+static inline u32
+ip_to_id(const struct bitmap_ip *m, u32 ip)
+{
+	return ((ip & ip_set_hostmask(m->netmask)) - m->first_ip)/m->hosts;
+}
+
+static int
+bitmap_ip_test(struct ip_set *set, void *value, u32 timeout)
+{
+	const struct bitmap_ip *map = set->data;
+	u16 id = *(u16 *)value;
+
+	return !!test_bit(id, map->members);
+}
+
+static int
+bitmap_ip_add(struct ip_set *set, void *value, u32 timeout)
+{
+	struct bitmap_ip *map = set->data;
+	u16 id = *(u16 *)value;
+
+	if (test_and_set_bit(id, map->members))
+		return -IPSET_ERR_EXIST;
+
+	return 0;
+}
+
+static int
+bitmap_ip_del(struct ip_set *set, void *value, u32 timeout)
+{
+	struct bitmap_ip *map = set->data;
+	u16 id = *(u16 *)value;
+
+	if (!test_and_clear_bit(id, map->members))
+		return -IPSET_ERR_EXIST;
+
+	return 0;
+}
+
+static int
+bitmap_ip_list(const struct ip_set *set,
+	       struct sk_buff *skb, struct netlink_callback *cb)
+{
+	const struct bitmap_ip *map = set->data;
+	struct nlattr *atd, *nested;
+	u32 id, first = cb->args[2];
+
+	atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+	if (!atd)
+		return -EMSGSIZE;
+	for (; cb->args[2] < map->elements; cb->args[2]++) {
+		id = cb->args[2];
+		if (!test_bit(id, map->members))
+			continue;
+		nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+		if (!nested) {
+			if (id == first) {
+				nla_nest_cancel(skb, atd);
+				return -EMSGSIZE;
+			} else
+				goto nla_put_failure;
+		}
+		NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+				htonl(map->first_ip + id * map->hosts));
+		ipset_nest_end(skb, nested);
+	}
+	ipset_nest_end(skb, atd);
+	/* Set listing finished */
+	cb->args[2] = 0;
+	return 0;
+
+nla_put_failure:
+	nla_nest_cancel(skb, nested);
+	ipset_nest_end(skb, atd);
+	if (unlikely(id == first)) {
+		cb->args[2] = 0;
+		return -EMSGSIZE;
+	}
+	return 0;
+}
+
+/* Timeout variant */
+
+static int
+bitmap_ip_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+	const struct bitmap_ip *map = set->data;
+	const unsigned long *members = map->members;
+	u16 id = *(u16 *)value;
+
+	return ip_set_timeout_test(members[id]);
+}
+
+static int
+bitmap_ip_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+	struct bitmap_ip *map = set->data;
+	unsigned long *members = map->members;
+	u16 id = *(u16 *)value;
+
+	if (ip_set_timeout_test(members[id]))
+		return -IPSET_ERR_EXIST;
+
+	members[id] = ip_set_timeout_set(timeout);
+
+	return 0;
+}
+
+static int
+bitmap_ip_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+	struct bitmap_ip *map = set->data;
+	unsigned long *members = map->members;
+	u16 id = *(u16 *)value;
+	int ret = -IPSET_ERR_EXIST;
+
+	if (ip_set_timeout_test(members[id]))
+		ret = 0;
+
+	members[id] = IPSET_ELEM_UNSET;
+	return ret;
+}
+
+static int
+bitmap_ip_tlist(const struct ip_set *set,
+		struct sk_buff *skb, struct netlink_callback *cb)
+{
+	const struct bitmap_ip *map = set->data;
+	struct nlattr *adt, *nested;
+	u32 id, first = cb->args[2];
+	const unsigned long *members = map->members;
+
+	adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
+	if (!adt)
+		return -EMSGSIZE;
+	for (; cb->args[2] < map->elements; cb->args[2]++) {
+		id = cb->args[2];
+		if (!ip_set_timeout_test(members[id]))
+			continue;
+		nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+		if (!nested) {
+			if (id == first) {
+				nla_nest_cancel(skb, adt);
+				return -EMSGSIZE;
+			} else
+				goto nla_put_failure;
+		}
+		NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+				htonl(map->first_ip + id * map->hosts));
+		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+			      htonl(ip_set_timeout_get(members[id])));
+		ipset_nest_end(skb, nested);
+	}
+	ipset_nest_end(skb, adt);
+
+	/* Set listing finished */
+	cb->args[2] = 0;
+
+	return 0;
+
+nla_put_failure:
+	nla_nest_cancel(skb, nested);
+	ipset_nest_end(skb, adt);
+	if (unlikely(id == first)) {
+		cb->args[2] = 0;
+		return -EMSGSIZE;
+	}
+	return 0;
+}
+
+static int
+bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb,
+	       enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+	struct bitmap_ip *map = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	u32 ip;
+
+	ip = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
+	if (ip < map->first_ip || ip > map->last_ip)
+		return -IPSET_ERR_BITMAP_RANGE;
+
+	ip = ip_to_id(map, ip);
+
+	return adtfn(set, &ip, map->timeout);
+}
+
+static int
+bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
+	       enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+	struct bitmap_ip *map = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	u32 timeout = map->timeout;
+	u32 ip, ip_to, id;
+	int ret = 0;
+
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+	if (ret)
+		return ret;
+
+	if (ip < map->first_ip || ip > map->last_ip)
+		return -IPSET_ERR_BITMAP_RANGE;
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		if (!with_timeout(map->timeout))
+			return -IPSET_ERR_TIMEOUT;
+		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+	}
+
+	if (adt == IPSET_TEST) {
+		id = ip_to_id(map, ip);
+		return adtfn(set, &id, timeout);
+	}
+
+	if (tb[IPSET_ATTR_IP_TO]) {
+		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+		if (ret)
+			return ret;
+		if (ip > ip_to) {
+			swap(ip, ip_to);
+			if (ip < map->first_ip)
+				return -IPSET_ERR_BITMAP_RANGE;
+		}
+	} else if (tb[IPSET_ATTR_CIDR]) {
+		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+		if (cidr > 32)
+			return -IPSET_ERR_INVALID_CIDR;
+		ip &= ip_set_hostmask(cidr);
+		ip_to = ip | ~ip_set_hostmask(cidr);
+	} else
+		ip_to = ip;
+
+	if (ip_to > map->last_ip)
+		return -IPSET_ERR_BITMAP_RANGE;
+
+	for (; !before(ip_to, ip); ip += map->hosts) {
+		id = ip_to_id(map, ip);
+		ret = adtfn(set, &id, timeout);;
+
+		if (ret && !ip_set_eexist(ret, flags))
+			return ret;
+		else
+			ret = 0;
+	}
+	return ret;
+}
+
+static void
+bitmap_ip_destroy(struct ip_set *set)
+{
+	struct bitmap_ip *map = set->data;
+
+	if (with_timeout(map->timeout))
+		del_timer_sync(&map->gc);
+
+	ip_set_free(map->members);
+	kfree(map);
+
+	set->data = NULL;
+}
+
+static void
+bitmap_ip_flush(struct ip_set *set)
+{
+	struct bitmap_ip *map = set->data;
+
+	memset(map->members, 0, map->memsize);
+}
+
+static int
+bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
+{
+	const struct bitmap_ip *map = set->data;
+	struct nlattr *nested;
+
+	nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+	if (!nested)
+		goto nla_put_failure;
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
+	if (map->netmask != 32)
+		NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask);
+	NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+		      htonl(atomic_read(&set->ref) - 1));
+	NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+		      htonl(sizeof(*map) + map->memsize));
+	if (with_timeout(map->timeout))
+		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+	ipset_nest_end(skb, nested);
+
+	return 0;
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+static bool
+bitmap_ip_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+	const struct bitmap_ip *x = a->data;
+	const struct bitmap_ip *y = b->data;
+
+	return x->first_ip == y->first_ip &&
+	       x->last_ip == y->last_ip &&
+	       x->netmask == y->netmask &&
+	       x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant bitmap_ip = {
+	.kadt	= bitmap_ip_kadt,
+	.uadt	= bitmap_ip_uadt,
+	.adt	= {
+		[IPSET_ADD] = bitmap_ip_add,
+		[IPSET_DEL] = bitmap_ip_del,
+		[IPSET_TEST] = bitmap_ip_test,
+	},
+	.destroy = bitmap_ip_destroy,
+	.flush	= bitmap_ip_flush,
+	.head	= bitmap_ip_head,
+	.list	= bitmap_ip_list,
+	.same_set = bitmap_ip_same_set,
+};
+
+static const struct ip_set_type_variant bitmap_tip = {
+	.kadt	= bitmap_ip_kadt,
+	.uadt	= bitmap_ip_uadt,
+	.adt	= {
+		[IPSET_ADD] = bitmap_ip_tadd,
+		[IPSET_DEL] = bitmap_ip_tdel,
+		[IPSET_TEST] = bitmap_ip_ttest,
+	},
+	.destroy = bitmap_ip_destroy,
+	.flush	= bitmap_ip_flush,
+	.head	= bitmap_ip_head,
+	.list	= bitmap_ip_tlist,
+	.same_set = bitmap_ip_same_set,
+};
+
+static void
+bitmap_ip_gc(unsigned long ul_set)
+{
+	struct ip_set *set = (struct ip_set *) ul_set;
+	struct bitmap_ip *map = set->data;
+	unsigned long *table = map->members;
+	u32 id;
+
+	/* We run parallel with other readers (test element)
+	 * but adding/deleting new entries is locked out */
+	read_lock_bh(&set->lock);
+	for (id = 0; id < map->elements; id++)
+		if (ip_set_timeout_expired(table[id]))
+			table[id] = IPSET_ELEM_UNSET;
+	read_unlock_bh(&set->lock);
+
+	map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+	add_timer(&map->gc);
+}
+
+static void
+bitmap_ip_gc_init(struct ip_set *set)
+{
+	struct bitmap_ip *map = set->data;
+
+	init_timer(&map->gc);
+	map->gc.data = (unsigned long) set;
+	map->gc.function = bitmap_ip_gc;
+	map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+	add_timer(&map->gc);
+}
+
+/* Create bitmap:ip type of sets */
+
+static bool
+init_map_ip(struct ip_set *set, struct bitmap_ip *map,
+	    u32 first_ip, u32 last_ip,
+	    u32 elements, u32 hosts, u8 netmask)
+{
+	map->members = ip_set_alloc(map->memsize);
+	if (!map->members)
+		return false;
+	map->first_ip = first_ip;
+	map->last_ip = last_ip;
+	map->elements = elements;
+	map->hosts = hosts;
+	map->netmask = netmask;
+	map->timeout = IPSET_NO_TIMEOUT;
+
+	set->data = map;
+	set->family = AF_INET;
+
+	return true;
+}
+
+static int
+bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+	struct bitmap_ip *map;
+	u32 first_ip, last_ip, hosts, elements;
+	u8 netmask = 32;
+	int ret;
+
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_IP_TO]) {
+		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
+		if (ret)
+			return ret;
+		if (first_ip > last_ip) {
+			u32 tmp = first_ip;
+
+			first_ip = last_ip;
+			last_ip = tmp;
+		}
+	} else if (tb[IPSET_ATTR_CIDR]) {
+		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+		if (cidr >= 32)
+			return -IPSET_ERR_INVALID_CIDR;
+		last_ip = first_ip | ~ip_set_hostmask(cidr);
+	} else
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_NETMASK]) {
+		netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
+
+		if (netmask > 32)
+			return -IPSET_ERR_INVALID_NETMASK;
+
+		first_ip &= ip_set_hostmask(netmask);
+		last_ip |= ~ip_set_hostmask(netmask);
+	}
+
+	if (netmask == 32) {
+		hosts = 1;
+		elements = last_ip - first_ip + 1;
+	} else {
+		u8 mask_bits;
+		u32 mask;
+
+		mask = range_to_mask(first_ip, last_ip, &mask_bits);
+
+		if ((!mask && (first_ip || last_ip != 0xFFFFFFFF)) ||
+		    netmask <= mask_bits)
+			return -IPSET_ERR_BITMAP_RANGE;
+
+		pr_debug("mask_bits %u, netmask %u\n", mask_bits, netmask);
+		hosts = 2 << (32 - netmask - 1);
+		elements = 2 << (netmask - mask_bits - 1);
+	}
+	if (elements > IPSET_BITMAP_MAX_RANGE + 1)
+		return -IPSET_ERR_BITMAP_RANGE_SIZE;
+
+	pr_debug("hosts %u, elements %u\n", hosts, elements);
+
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
+	if (!map)
+		return -ENOMEM;
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		map->memsize = elements * sizeof(unsigned long);
+
+		if (!init_map_ip(set, map, first_ip, last_ip,
+				 elements, hosts, netmask)) {
+			kfree(map);
+			return -ENOMEM;
+		}
+
+		map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+		set->variant = &bitmap_tip;
+
+		bitmap_ip_gc_init(set);
+	} else {
+		map->memsize = bitmap_bytes(0, elements - 1);
+
+		if (!init_map_ip(set, map, first_ip, last_ip,
+				 elements, hosts, netmask)) {
+			kfree(map);
+			return -ENOMEM;
+		}
+
+		set->variant = &bitmap_ip;
+	}
+	return 0;
+}
+
+static struct ip_set_type bitmap_ip_type __read_mostly = {
+	.name		= "bitmap:ip",
+	.protocol	= IPSET_PROTOCOL,
+	.features	= IPSET_TYPE_IP,
+	.dimension	= IPSET_DIM_ONE,
+	.family		= AF_INET,
+	.revision	= 0,
+	.create		= bitmap_ip_create,
+	.create_policy	= {
+		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
+		[IPSET_ATTR_IP_TO]	= { .type = NLA_NESTED },
+		[IPSET_ATTR_CIDR]	= { .type = NLA_U8 },
+		[IPSET_ATTR_NETMASK]	= { .type = NLA_U8  },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+	},
+	.adt_policy	= {
+		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
+		[IPSET_ATTR_IP_TO]	= { .type = NLA_NESTED },
+		[IPSET_ATTR_CIDR]	= { .type = NLA_U8 },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
+	},
+	.me		= THIS_MODULE,
+};
+
+static int __init
+bitmap_ip_init(void)
+{
+	return ip_set_type_register(&bitmap_ip_type);
+}
+
+static void __exit
+bitmap_ip_fini(void)
+{
+	ip_set_type_unregister(&bitmap_ip_type);
+}
+
+module_init(bitmap_ip_init);
+module_exit(bitmap_ip_fini);
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
new file mode 100644
index 0000000..5e79017
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -0,0 +1,652 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ *                         Patrick Schaaf <bof@bof.de>
+ *			   Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the bitmap:ip,mac type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+#include <linux/netlink.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_bitmap.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("bitmap:ip,mac type of IP sets");
+MODULE_ALIAS("ip_set_bitmap:ip,mac");
+
+enum {
+	MAC_EMPTY,		/* element is not set */
+	MAC_FILLED,		/* element is set with MAC */
+	MAC_UNSET,		/* element is set, without MAC */
+};
+
+/* Type structure */
+struct bitmap_ipmac {
+	void *members;		/* the set members */
+	u32 first_ip;		/* host byte order, included in range */
+	u32 last_ip;		/* host byte order, included in range */
+	u32 timeout;		/* timeout value */
+	struct timer_list gc;	/* garbage collector */
+	size_t dsize;		/* size of element */
+};
+
+/* ADT structure for generic function args */
+struct ipmac {
+	u32 id;			/* id in array */
+	unsigned char *ether;	/* ethernet address */
+};
+
+/* Member element without and with timeout */
+
+struct ipmac_elem {
+	unsigned char ether[ETH_ALEN];
+	unsigned char match;
+} __attribute__ ((aligned));
+
+struct ipmac_telem {
+	unsigned char ether[ETH_ALEN];
+	unsigned char match;
+	unsigned long timeout;
+} __attribute__ ((aligned));
+
+static inline void *
+bitmap_ipmac_elem(const struct bitmap_ipmac *map, u32 id)
+{
+	return (void *)((char *)map->members + id * map->dsize);
+}
+
+static inline bool
+bitmap_timeout(const struct bitmap_ipmac *map, u32 id)
+{
+	const struct ipmac_telem *elem = bitmap_ipmac_elem(map, id);
+
+	return ip_set_timeout_test(elem->timeout);
+}
+
+static inline bool
+bitmap_expired(const struct bitmap_ipmac *map, u32 id)
+{
+	const struct ipmac_telem *elem = bitmap_ipmac_elem(map, id);
+
+	return ip_set_timeout_expired(elem->timeout);
+}
+
+static inline int
+bitmap_ipmac_exist(const struct ipmac_telem *elem)
+{
+	return elem->match == MAC_UNSET ||
+	       (elem->match == MAC_FILLED &&
+		!ip_set_timeout_expired(elem->timeout));
+}
+
+/* Base variant */
+
+static int
+bitmap_ipmac_test(struct ip_set *set, void *value, u32 timeout)
+{
+	const struct bitmap_ipmac *map = set->data;
+	const struct ipmac *data = value;
+	const struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+	switch (elem->match) {
+	case MAC_UNSET:
+		/* Trigger kernel to fill out the ethernet address */
+		return -EAGAIN;
+	case MAC_FILLED:
+		return data->ether == NULL ||
+		       compare_ether_addr(data->ether, elem->ether) == 0;
+	}
+	return 0;
+}
+
+static int
+bitmap_ipmac_add(struct ip_set *set, void *value, u32 timeout)
+{
+	struct bitmap_ipmac *map = set->data;
+	const struct ipmac *data = value;
+	struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+	switch (elem->match) {
+	case MAC_UNSET:
+		if (!data->ether)
+			/* Already added without ethernet address */
+			return -IPSET_ERR_EXIST;
+		/* Fill the MAC address */
+		memcpy(elem->ether, data->ether, ETH_ALEN);
+		elem->match = MAC_FILLED;
+		break;
+	case MAC_FILLED:
+		return -IPSET_ERR_EXIST;
+	case MAC_EMPTY:
+		if (data->ether) {
+			memcpy(elem->ether, data->ether, ETH_ALEN);
+			elem->match = MAC_FILLED;
+		} else
+			elem->match = MAC_UNSET;
+	}
+
+	return 0;
+}
+
+static int
+bitmap_ipmac_del(struct ip_set *set, void *value, u32 timeout)
+{
+	struct bitmap_ipmac *map = set->data;
+	const struct ipmac *data = value;
+	struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+	if (elem->match == MAC_EMPTY)
+		return -IPSET_ERR_EXIST;
+
+	elem->match = MAC_EMPTY;
+
+	return 0;
+}
+
+static int
+bitmap_ipmac_list(const struct ip_set *set,
+		  struct sk_buff *skb, struct netlink_callback *cb)
+{
+	const struct bitmap_ipmac *map = set->data;
+	const struct ipmac_elem *elem;
+	struct nlattr *atd, *nested;
+	u32 id, first = cb->args[2];
+	u32 last = map->last_ip - map->first_ip;
+
+	atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+	if (!atd)
+		return -EMSGSIZE;
+	for (; cb->args[2] <= last; cb->args[2]++) {
+		id = cb->args[2];
+		elem = bitmap_ipmac_elem(map, id);
+		if (elem->match == MAC_EMPTY)
+			continue;
+		nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+		if (!nested) {
+			if (id == first) {
+				nla_nest_cancel(skb, atd);
+				return -EMSGSIZE;
+			} else
+				goto nla_put_failure;
+		}
+		NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+				htonl(map->first_ip + id));
+		if (elem->match == MAC_FILLED)
+			NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
+				elem->ether);
+		ipset_nest_end(skb, nested);
+	}
+	ipset_nest_end(skb, atd);
+	/* Set listing finished */
+	cb->args[2] = 0;
+
+	return 0;
+
+nla_put_failure:
+	nla_nest_cancel(skb, nested);
+	ipset_nest_end(skb, atd);
+	if (unlikely(id == first)) {
+		cb->args[2] = 0;
+		return -EMSGSIZE;
+	}
+	return 0;
+}
+
+/* Timeout variant */
+
+static int
+bitmap_ipmac_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+	const struct bitmap_ipmac *map = set->data;
+	const struct ipmac *data = value;
+	const struct ipmac_elem *elem = bitmap_ipmac_elem(map, data->id);
+
+	switch (elem->match) {
+	case MAC_UNSET:
+		/* Trigger kernel to fill out the ethernet address */
+		return -EAGAIN;
+	case MAC_FILLED:
+		return (data->ether == NULL ||
+			compare_ether_addr(data->ether, elem->ether) == 0) &&
+		       !bitmap_expired(map, data->id);
+	}
+	return 0;
+}
+
+static int
+bitmap_ipmac_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+	struct bitmap_ipmac *map = set->data;
+	const struct ipmac *data = value;
+	struct ipmac_telem *elem = bitmap_ipmac_elem(map, data->id);
+
+	switch (elem->match) {
+	case MAC_UNSET:
+		if (!data->ether)
+			/* Already added without ethernet address */
+			return -IPSET_ERR_EXIST;
+		/* Fill the MAC address and activate the timer */
+		memcpy(elem->ether, data->ether, ETH_ALEN);
+		elem->match = MAC_FILLED;
+		if (timeout == map->timeout)
+			/* Timeout was not specified, get stored one */
+			timeout = elem->timeout;
+		elem->timeout = ip_set_timeout_set(timeout);
+		break;
+	case MAC_FILLED:
+		if (!bitmap_expired(map, data->id))
+			return -IPSET_ERR_EXIST;
+		/* Fall through */
+	case MAC_EMPTY:
+		if (data->ether) {
+			memcpy(elem->ether, data->ether, ETH_ALEN);
+			elem->match = MAC_FILLED;
+		} else
+			elem->match = MAC_UNSET;
+		/* If MAC is unset yet, we store plain timeout value
+		 * because the timer is not activated yet
+		 * and we can reuse it later when MAC is filled out,
+		 * possibly by the kernel */
+		elem->timeout = data->ether ? ip_set_timeout_set(timeout)
+					    : timeout;
+		break;
+	}
+
+	return 0;
+}
+
+static int
+bitmap_ipmac_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+	struct bitmap_ipmac *map = set->data;
+	const struct ipmac *data = value;
+	struct ipmac_telem *elem = bitmap_ipmac_elem(map, data->id);
+
+	if (elem->match == MAC_EMPTY || bitmap_expired(map, data->id))
+		return -IPSET_ERR_EXIST;
+
+	elem->match = MAC_EMPTY;
+
+	return 0;
+}
+
+static int
+bitmap_ipmac_tlist(const struct ip_set *set,
+		   struct sk_buff *skb, struct netlink_callback *cb)
+{
+	const struct bitmap_ipmac *map = set->data;
+	const struct ipmac_telem *elem;
+	struct nlattr *atd, *nested;
+	u32 id, first = cb->args[2];
+	u32 timeout, last = map->last_ip - map->first_ip;
+
+	atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+	if (!atd)
+		return -EMSGSIZE;
+	for (; cb->args[2] <= last; cb->args[2]++) {
+		id = cb->args[2];
+		elem = bitmap_ipmac_elem(map, id);
+		if (!bitmap_ipmac_exist(elem))
+			continue;
+		nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+		if (!nested) {
+			if (id == first) {
+				nla_nest_cancel(skb, atd);
+				return -EMSGSIZE;
+			} else
+				goto nla_put_failure;
+		}
+		NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP,
+				htonl(map->first_ip + id));
+		if (elem->match == MAC_FILLED)
+			NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN,
+				elem->ether);
+		timeout = elem->match == MAC_UNSET ? elem->timeout
+				: ip_set_timeout_get(elem->timeout);
+		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout));
+		ipset_nest_end(skb, nested);
+	}
+	ipset_nest_end(skb, atd);
+	/* Set listing finished */
+	cb->args[2] = 0;
+
+	return 0;
+
+nla_put_failure:
+	nla_nest_cancel(skb, nested);
+	ipset_nest_end(skb, atd);
+	return -EMSGSIZE;
+}
+
+static int
+bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
+		  enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+	struct bitmap_ipmac *map = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct ipmac data;
+
+	data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC));
+	if (data.id < map->first_ip || data.id > map->last_ip)
+		return -IPSET_ERR_BITMAP_RANGE;
+
+	/* Backward compatibility: we don't check the second flag */
+	if (skb_mac_header(skb) < skb->head ||
+	    (skb_mac_header(skb) + ETH_HLEN) > skb->data)
+		return -EINVAL;
+
+	data.id -= map->first_ip;
+	data.ether = eth_hdr(skb)->h_source;
+
+	return adtfn(set, &data, map->timeout);
+}
+
+static int
+bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
+		  enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+	const struct bitmap_ipmac *map = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct ipmac data;
+	u32 timeout = map->timeout;
+	int ret = 0;
+
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &data.id);
+	if (ret)
+		return ret;
+
+	if (data.id < map->first_ip || data.id > map->last_ip)
+		return -IPSET_ERR_BITMAP_RANGE;
+
+	if (tb[IPSET_ATTR_ETHER])
+		data.ether = nla_data(tb[IPSET_ATTR_ETHER]);
+	else
+		data.ether = NULL;
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		if (!with_timeout(map->timeout))
+			return -IPSET_ERR_TIMEOUT;
+		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+	}
+
+	data.id -= map->first_ip;
+
+	ret = adtfn(set, &data, timeout);
+
+	return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static void
+bitmap_ipmac_destroy(struct ip_set *set)
+{
+	struct bitmap_ipmac *map = set->data;
+
+	if (with_timeout(map->timeout))
+		del_timer_sync(&map->gc);
+
+	ip_set_free(map->members);
+	kfree(map);
+
+	set->data = NULL;
+}
+
+static void
+bitmap_ipmac_flush(struct ip_set *set)
+{
+	struct bitmap_ipmac *map = set->data;
+
+	memset(map->members, 0,
+	       (map->last_ip - map->first_ip + 1) * map->dsize);
+}
+
+static int
+bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
+{
+	const struct bitmap_ipmac *map = set->data;
+	struct nlattr *nested;
+
+	nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+	if (!nested)
+		goto nla_put_failure;
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
+	NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+		      htonl(atomic_read(&set->ref) - 1));
+	NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+		      htonl(sizeof(*map)
+			    + (map->last_ip - map->first_ip + 1) * map->dsize));
+	if (with_timeout(map->timeout))
+		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+	ipset_nest_end(skb, nested);
+
+	return 0;
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+static bool
+bitmap_ipmac_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+	const struct bitmap_ipmac *x = a->data;
+	const struct bitmap_ipmac *y = b->data;
+
+	return x->first_ip == y->first_ip &&
+	       x->last_ip == y->last_ip &&
+	       x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant bitmap_ipmac = {
+	.kadt	= bitmap_ipmac_kadt,
+	.uadt	= bitmap_ipmac_uadt,
+	.adt	= {
+		[IPSET_ADD] = bitmap_ipmac_add,
+		[IPSET_DEL] = bitmap_ipmac_del,
+		[IPSET_TEST] = bitmap_ipmac_test,
+	},
+	.destroy = bitmap_ipmac_destroy,
+	.flush	= bitmap_ipmac_flush,
+	.head	= bitmap_ipmac_head,
+	.list	= bitmap_ipmac_list,
+	.same_set = bitmap_ipmac_same_set,
+};
+
+static const struct ip_set_type_variant bitmap_tipmac = {
+	.kadt	= bitmap_ipmac_kadt,
+	.uadt	= bitmap_ipmac_uadt,
+	.adt	= {
+		[IPSET_ADD] = bitmap_ipmac_tadd,
+		[IPSET_DEL] = bitmap_ipmac_tdel,
+		[IPSET_TEST] = bitmap_ipmac_ttest,
+	},
+	.destroy = bitmap_ipmac_destroy,
+	.flush	= bitmap_ipmac_flush,
+	.head	= bitmap_ipmac_head,
+	.list	= bitmap_ipmac_tlist,
+	.same_set = bitmap_ipmac_same_set,
+};
+
+static void
+bitmap_ipmac_gc(unsigned long ul_set)
+{
+	struct ip_set *set = (struct ip_set *) ul_set;
+	struct bitmap_ipmac *map = set->data;
+	struct ipmac_telem *elem;
+	u32 id, last = map->last_ip - map->first_ip;
+
+	/* We run parallel with other readers (test element)
+	 * but adding/deleting new entries is locked out */
+	read_lock_bh(&set->lock);
+	for (id = 0; id <= last; id++) {
+		elem = bitmap_ipmac_elem(map, id);
+		if (elem->match == MAC_FILLED &&
+		    ip_set_timeout_expired(elem->timeout))
+			elem->match = MAC_EMPTY;
+	}
+	read_unlock_bh(&set->lock);
+
+	map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+	add_timer(&map->gc);
+}
+
+static void
+bitmap_ipmac_gc_init(struct ip_set *set)
+{
+	struct bitmap_ipmac *map = set->data;
+
+	init_timer(&map->gc);
+	map->gc.data = (unsigned long) set;
+	map->gc.function = bitmap_ipmac_gc;
+	map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+	add_timer(&map->gc);
+}
+
+/* Create bitmap:ip,mac type of sets */
+
+static bool
+init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
+	       u32 first_ip, u32 last_ip)
+{
+	map->members = ip_set_alloc((last_ip - first_ip + 1) * map->dsize);
+	if (!map->members)
+		return false;
+	map->first_ip = first_ip;
+	map->last_ip = last_ip;
+	map->timeout = IPSET_NO_TIMEOUT;
+
+	set->data = map;
+	set->family = AF_INET;
+
+	return true;
+}
+
+static int
+bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
+		    u32 flags)
+{
+	u32 first_ip, last_ip, elements;
+	struct bitmap_ipmac *map;
+	int ret;
+
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_IP_TO]) {
+		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip);
+		if (ret)
+			return ret;
+		if (first_ip > last_ip) {
+			u32 tmp = first_ip;
+
+			first_ip = last_ip;
+			last_ip = tmp;
+		}
+	} else if (tb[IPSET_ATTR_CIDR]) {
+		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+		if (cidr >= 32)
+			return -IPSET_ERR_INVALID_CIDR;
+		last_ip = first_ip | ~ip_set_hostmask(cidr);
+	} else
+		return -IPSET_ERR_PROTOCOL;
+
+	elements = last_ip - first_ip + 1;
+
+	if (elements > IPSET_BITMAP_MAX_RANGE + 1)
+		return -IPSET_ERR_BITMAP_RANGE_SIZE;
+
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
+	if (!map)
+		return -ENOMEM;
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		map->dsize = sizeof(struct ipmac_telem);
+
+		if (!init_map_ipmac(set, map, first_ip, last_ip)) {
+			kfree(map);
+			return -ENOMEM;
+		}
+
+		map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+		set->variant = &bitmap_tipmac;
+
+		bitmap_ipmac_gc_init(set);
+	} else {
+		map->dsize = sizeof(struct ipmac_elem);
+
+		if (!init_map_ipmac(set, map, first_ip, last_ip)) {
+			kfree(map);
+			return -ENOMEM;
+		}
+		set->variant = &bitmap_ipmac;
+
+	}
+	return 0;
+}
+
+static struct ip_set_type bitmap_ipmac_type = {
+	.name		= "bitmap:ip,mac",
+	.protocol	= IPSET_PROTOCOL,
+	.features	= IPSET_TYPE_IP | IPSET_TYPE_MAC,
+	.dimension	= IPSET_DIM_TWO,
+	.family		= AF_INET,
+	.revision	= 0,
+	.create		= bitmap_ipmac_create,
+	.create_policy	= {
+		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
+		[IPSET_ATTR_IP_TO]	= { .type = NLA_NESTED },
+		[IPSET_ATTR_CIDR]	= { .type = NLA_U8 },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+	},
+	.adt_policy	= {
+		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
+		[IPSET_ATTR_ETHER]	= { .type = NLA_BINARY, .len  = ETH_ALEN },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
+	},
+	.me		= THIS_MODULE,
+};
+
+static int __init
+bitmap_ipmac_init(void)
+{
+	return ip_set_type_register(&bitmap_ipmac_type);
+}
+
+static void __exit
+bitmap_ipmac_fini(void)
+{
+	ip_set_type_unregister(&bitmap_ipmac_type);
+}
+
+module_init(bitmap_ipmac_init);
+module_exit(bitmap_ipmac_fini);
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
new file mode 100644
index 0000000..165f09b
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -0,0 +1,515 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the bitmap:port type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/netlink.h>
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_bitmap.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#define IP_SET_BITMAP_TIMEOUT
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("bitmap:port type of IP sets");
+MODULE_ALIAS("ip_set_bitmap:port");
+
+/* Type structure */
+struct bitmap_port {
+	void *members;		/* the set members */
+	u16 first_port;		/* host byte order, included in range */
+	u16 last_port;		/* host byte order, included in range */
+	size_t memsize;		/* members size */
+	u32 timeout;		/* timeout parameter */
+	struct timer_list gc;	/* garbage collection */
+};
+
+/* Base variant */
+
+static int
+bitmap_port_test(struct ip_set *set, void *value, u32 timeout)
+{
+	const struct bitmap_port *map = set->data;
+	u16 id = *(u16 *)value;
+
+	return !!test_bit(id, map->members);
+}
+
+static int
+bitmap_port_add(struct ip_set *set, void *value, u32 timeout)
+{
+	struct bitmap_port *map = set->data;
+	u16 id = *(u16 *)value;
+
+	if (test_and_set_bit(id, map->members))
+		return -IPSET_ERR_EXIST;
+
+	return 0;
+}
+
+static int
+bitmap_port_del(struct ip_set *set, void *value, u32 timeout)
+{
+	struct bitmap_port *map = set->data;
+	u16 id = *(u16 *)value;
+
+	if (!test_and_clear_bit(id, map->members))
+		return -IPSET_ERR_EXIST;
+
+	return 0;
+}
+
+static int
+bitmap_port_list(const struct ip_set *set,
+		 struct sk_buff *skb, struct netlink_callback *cb)
+{
+	const struct bitmap_port *map = set->data;
+	struct nlattr *atd, *nested;
+	u16 id, first = cb->args[2];
+	u16 last = map->last_port - map->first_port;
+
+	atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+	if (!atd)
+		return -EMSGSIZE;
+	for (; cb->args[2] <= last; cb->args[2]++) {
+		id = cb->args[2];
+		if (!test_bit(id, map->members))
+			continue;
+		nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+		if (!nested) {
+			if (id == first) {
+				nla_nest_cancel(skb, atd);
+				return -EMSGSIZE;
+			} else
+				goto nla_put_failure;
+		}
+		NLA_PUT_NET16(skb, IPSET_ATTR_PORT,
+			      htons(map->first_port + id));
+		ipset_nest_end(skb, nested);
+	}
+	ipset_nest_end(skb, atd);
+	/* Set listing finished */
+	cb->args[2] = 0;
+
+	return 0;
+
+nla_put_failure:
+	nla_nest_cancel(skb, nested);
+	ipset_nest_end(skb, atd);
+	if (unlikely(id == first)) {
+		cb->args[2] = 0;
+		return -EMSGSIZE;
+	}
+	return 0;
+}
+
+/* Timeout variant */
+
+static int
+bitmap_port_ttest(struct ip_set *set, void *value, u32 timeout)
+{
+	const struct bitmap_port *map = set->data;
+	const unsigned long *members = map->members;
+	u16 id = *(u16 *)value;
+
+	return ip_set_timeout_test(members[id]);
+}
+
+static int
+bitmap_port_tadd(struct ip_set *set, void *value, u32 timeout)
+{
+	struct bitmap_port *map = set->data;
+	unsigned long *members = map->members;
+	u16 id = *(u16 *)value;
+
+	if (ip_set_timeout_test(members[id]))
+		return -IPSET_ERR_EXIST;
+
+	members[id] = ip_set_timeout_set(timeout);
+
+	return 0;
+}
+
+static int
+bitmap_port_tdel(struct ip_set *set, void *value, u32 timeout)
+{
+	struct bitmap_port *map = set->data;
+	unsigned long *members = map->members;
+	u16 id = *(u16 *)value;
+	int ret = -IPSET_ERR_EXIST;
+
+	if (ip_set_timeout_test(members[id]))
+		ret = 0;
+
+	members[id] = IPSET_ELEM_UNSET;
+	return ret;
+}
+
+static int
+bitmap_port_tlist(const struct ip_set *set,
+		  struct sk_buff *skb, struct netlink_callback *cb)
+{
+	const struct bitmap_port *map = set->data;
+	struct nlattr *adt, *nested;
+	u16 id, first = cb->args[2];
+	u16 last = map->last_port - map->first_port;
+	const unsigned long *members = map->members;
+
+	adt = ipset_nest_start(skb, IPSET_ATTR_ADT);
+	if (!adt)
+		return -EMSGSIZE;
+	for (; cb->args[2] <= last; cb->args[2]++) {
+		id = cb->args[2];
+		if (!ip_set_timeout_test(members[id]))
+			continue;
+		nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+		if (!nested) {
+			if (id == first) {
+				nla_nest_cancel(skb, adt);
+				return -EMSGSIZE;
+			} else
+				goto nla_put_failure;
+		}
+		NLA_PUT_NET16(skb, IPSET_ATTR_PORT,
+			      htons(map->first_port + id));
+		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+			      htonl(ip_set_timeout_get(members[id])));
+		ipset_nest_end(skb, nested);
+	}
+	ipset_nest_end(skb, adt);
+
+	/* Set listing finished */
+	cb->args[2] = 0;
+
+	return 0;
+
+nla_put_failure:
+	nla_nest_cancel(skb, nested);
+	ipset_nest_end(skb, adt);
+	if (unlikely(id == first)) {
+		cb->args[2] = 0;
+		return -EMSGSIZE;
+	}
+	return 0;
+}
+
+static int
+bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb,
+		 enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+	struct bitmap_port *map = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	__be16 __port;
+	u16 port = 0;
+
+	if (!ip_set_get_ip_port(skb, pf, flags & IPSET_DIM_ONE_SRC, &__port))
+		return -EINVAL;
+
+	port = ntohs(__port);
+
+	if (port < map->first_port || port > map->last_port)
+		return -IPSET_ERR_BITMAP_RANGE;
+
+	port -= map->first_port;
+
+	return adtfn(set, &port, map->timeout);
+}
+
+static int
+bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
+		 enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+	struct bitmap_port *map = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	u32 timeout = map->timeout;
+	u32 port;	/* wraparound */
+	u16 id, port_to;
+	int ret = 0;
+
+	if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
+	if (port < map->first_port || port > map->last_port)
+		return -IPSET_ERR_BITMAP_RANGE;
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		if (!with_timeout(map->timeout))
+			return -IPSET_ERR_TIMEOUT;
+		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+	}
+
+	if (adt == IPSET_TEST) {
+		id = port - map->first_port;
+		return adtfn(set, &id, timeout);
+	}
+
+	if (tb[IPSET_ATTR_PORT_TO]) {
+		port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+		if (port > port_to) {
+			swap(port, port_to);
+			if (port < map->first_port)
+				return -IPSET_ERR_BITMAP_RANGE;
+		}
+	} else
+		port_to = port;
+
+	if (port_to > map->last_port)
+		return -IPSET_ERR_BITMAP_RANGE;
+
+	for (; port <= port_to; port++) {
+		id = port - map->first_port;
+		ret = adtfn(set, &id, timeout);
+
+		if (ret && !ip_set_eexist(ret, flags))
+			return ret;
+		else
+			ret = 0;
+	}
+	return ret;
+}
+
+static void
+bitmap_port_destroy(struct ip_set *set)
+{
+	struct bitmap_port *map = set->data;
+
+	if (with_timeout(map->timeout))
+		del_timer_sync(&map->gc);
+
+	ip_set_free(map->members);
+	kfree(map);
+
+	set->data = NULL;
+}
+
+static void
+bitmap_port_flush(struct ip_set *set)
+{
+	struct bitmap_port *map = set->data;
+
+	memset(map->members, 0, map->memsize);
+}
+
+static int
+bitmap_port_head(struct ip_set *set, struct sk_buff *skb)
+{
+	const struct bitmap_port *map = set->data;
+	struct nlattr *nested;
+
+	nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+	if (!nested)
+		goto nla_put_failure;
+	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port));
+	NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
+	NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+		      htonl(atomic_read(&set->ref) - 1));
+	NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+		      htonl(sizeof(*map) + map->memsize));
+	if (with_timeout(map->timeout))
+		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+	ipset_nest_end(skb, nested);
+
+	return 0;
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+static bool
+bitmap_port_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+	const struct bitmap_port *x = a->data;
+	const struct bitmap_port *y = b->data;
+
+	return x->first_port == y->first_port &&
+	       x->last_port == y->last_port &&
+	       x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant bitmap_port = {
+	.kadt	= bitmap_port_kadt,
+	.uadt	= bitmap_port_uadt,
+	.adt	= {
+		[IPSET_ADD] = bitmap_port_add,
+		[IPSET_DEL] = bitmap_port_del,
+		[IPSET_TEST] = bitmap_port_test,
+	},
+	.destroy = bitmap_port_destroy,
+	.flush	= bitmap_port_flush,
+	.head	= bitmap_port_head,
+	.list	= bitmap_port_list,
+	.same_set = bitmap_port_same_set,
+};
+
+static const struct ip_set_type_variant bitmap_tport = {
+	.kadt	= bitmap_port_kadt,
+	.uadt	= bitmap_port_uadt,
+	.adt	= {
+		[IPSET_ADD] = bitmap_port_tadd,
+		[IPSET_DEL] = bitmap_port_tdel,
+		[IPSET_TEST] = bitmap_port_ttest,
+	},
+	.destroy = bitmap_port_destroy,
+	.flush	= bitmap_port_flush,
+	.head	= bitmap_port_head,
+	.list	= bitmap_port_tlist,
+	.same_set = bitmap_port_same_set,
+};
+
+static void
+bitmap_port_gc(unsigned long ul_set)
+{
+	struct ip_set *set = (struct ip_set *) ul_set;
+	struct bitmap_port *map = set->data;
+	unsigned long *table = map->members;
+	u32 id;	/* wraparound */
+	u16 last = map->last_port - map->first_port;
+
+	/* We run parallel with other readers (test element)
+	 * but adding/deleting new entries is locked out */
+	read_lock_bh(&set->lock);
+	for (id = 0; id <= last; id++)
+		if (ip_set_timeout_expired(table[id]))
+			table[id] = IPSET_ELEM_UNSET;
+	read_unlock_bh(&set->lock);
+
+	map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+	add_timer(&map->gc);
+}
+
+static void
+bitmap_port_gc_init(struct ip_set *set)
+{
+	struct bitmap_port *map = set->data;
+
+	init_timer(&map->gc);
+	map->gc.data = (unsigned long) set;
+	map->gc.function = bitmap_port_gc;
+	map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+	add_timer(&map->gc);
+}
+
+/* Create bitmap:ip type of sets */
+
+static bool
+init_map_port(struct ip_set *set, struct bitmap_port *map,
+	      u16 first_port, u16 last_port)
+{
+	map->members = ip_set_alloc(map->memsize);
+	if (!map->members)
+		return false;
+	map->first_port = first_port;
+	map->last_port = last_port;
+	map->timeout = IPSET_NO_TIMEOUT;
+
+	set->data = map;
+	set->family = AF_UNSPEC;
+
+	return true;
+}
+
+static int
+bitmap_port_create(struct ip_set *set, struct nlattr *tb[],
+		 u32 flags)
+{
+	struct bitmap_port *map;
+	u16 first_port, last_port;
+
+	if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	first_port = ip_set_get_h16(tb[IPSET_ATTR_PORT]);
+	last_port = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+	if (first_port > last_port) {
+		u16 tmp = first_port;
+
+		first_port = last_port;
+		last_port = tmp;
+	}
+
+	map = kzalloc(sizeof(*map), GFP_KERNEL);
+	if (!map)
+		return -ENOMEM;
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		map->memsize = (last_port - first_port + 1)
+			       * sizeof(unsigned long);
+
+		if (!init_map_port(set, map, first_port, last_port)) {
+			kfree(map);
+			return -ENOMEM;
+		}
+
+		map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+		set->variant = &bitmap_tport;
+
+		bitmap_port_gc_init(set);
+	} else {
+		map->memsize = bitmap_bytes(0, last_port - first_port);
+		pr_debug("memsize: %zu\n", map->memsize);
+		if (!init_map_port(set, map, first_port, last_port)) {
+			kfree(map);
+			return -ENOMEM;
+		}
+
+		set->variant = &bitmap_port;
+	}
+	return 0;
+}
+
+static struct ip_set_type bitmap_port_type = {
+	.name		= "bitmap:port",
+	.protocol	= IPSET_PROTOCOL,
+	.features	= IPSET_TYPE_PORT,
+	.dimension	= IPSET_DIM_ONE,
+	.family		= AF_UNSPEC,
+	.revision	= 0,
+	.create		= bitmap_port_create,
+	.create_policy	= {
+		[IPSET_ATTR_PORT]	= { .type = NLA_U16 },
+		[IPSET_ATTR_PORT_TO]	= { .type = NLA_U16 },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+	},
+	.adt_policy	= {
+		[IPSET_ATTR_PORT]	= { .type = NLA_U16 },
+		[IPSET_ATTR_PORT_TO]	= { .type = NLA_U16 },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
+	},
+	.me		= THIS_MODULE,
+};
+
+static int __init
+bitmap_port_init(void)
+{
+	return ip_set_type_register(&bitmap_port_type);
+}
+
+static void __exit
+bitmap_port_fini(void)
+{
+	ip_set_type_unregister(&bitmap_port_type);
+}
+
+module_init(bitmap_port_init);
+module_exit(bitmap_port_fini);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
new file mode 100644
index 0000000..8b1a54c
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -0,0 +1,1671 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ *                         Patrick Schaaf <bof@bof.de>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module for IP set management */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/netlink.h>
+#include <linux/rculist.h>
+#include <linux/version.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/ipset/ip_set.h>
+
+static LIST_HEAD(ip_set_type_list);		/* all registered set types */
+static DEFINE_MUTEX(ip_set_type_mutex);		/* protects ip_set_type_list */
+
+static struct ip_set **ip_set_list;		/* all individual sets */
+static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
+
+#define STREQ(a, b)	(strncmp(a, b, IPSET_MAXNAMELEN) == 0)
+
+static unsigned int max_sets;
+
+module_param(max_sets, int, 0600);
+MODULE_PARM_DESC(max_sets, "maximal number of sets");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("core IP set support");
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
+
+/*
+ * The set types are implemented in modules and registered set types
+ * can be found in ip_set_type_list. Adding/deleting types is
+ * serialized by ip_set_type_mutex.
+ */
+
+static inline void
+ip_set_type_lock(void)
+{
+	mutex_lock(&ip_set_type_mutex);
+}
+
+static inline void
+ip_set_type_unlock(void)
+{
+	mutex_unlock(&ip_set_type_mutex);
+}
+
+/* Register and deregister settype */
+
+static struct ip_set_type *
+find_set_type(const char *name, u8 family, u8 revision)
+{
+	struct ip_set_type *type;
+
+	list_for_each_entry_rcu(type, &ip_set_type_list, list)
+		if (STREQ(type->name, name) &&
+		    (type->family == family || type->family == AF_UNSPEC) &&
+		    type->revision == revision)
+			return type;
+	return NULL;
+}
+
+/* Unlock, try to load a set type module and lock again */
+static int
+try_to_load_type(const char *name)
+{
+	nfnl_unlock();
+	pr_debug("try to load ip_set_%s\n", name);
+	if (request_module("ip_set_%s", name) < 0) {
+		pr_warning("Can't find ip_set type %s\n", name);
+		nfnl_lock();
+		return -IPSET_ERR_FIND_TYPE;
+	}
+	nfnl_lock();
+	return -EAGAIN;
+}
+
+/* Find a set type and reference it */
+static int
+find_set_type_get(const char *name, u8 family, u8 revision,
+		  struct ip_set_type **found)
+{
+	rcu_read_lock();
+	*found = find_set_type(name, family, revision);
+	if (*found) {
+		int err = !try_module_get((*found)->me);
+		rcu_read_unlock();
+		return err ? -EFAULT : 0;
+	}
+	rcu_read_unlock();
+
+	return try_to_load_type(name);
+}
+
+/* Find a given set type by name and family.
+ * If we succeeded, the supported minimal and maximum revisions are
+ * filled out.
+ */
+static int
+find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max)
+{
+	struct ip_set_type *type;
+	bool found = false;
+
+	*min = *max = 0;
+	rcu_read_lock();
+	list_for_each_entry_rcu(type, &ip_set_type_list, list)
+		if (STREQ(type->name, name) &&
+		    (type->family == family || type->family == AF_UNSPEC)) {
+			found = true;
+			if (type->revision < *min)
+				*min = type->revision;
+			else if (type->revision > *max)
+				*max = type->revision;
+		}
+	rcu_read_unlock();
+	if (found)
+		return 0;
+
+	return try_to_load_type(name);
+}
+
+#define family_name(f)	((f) == AF_INET ? "inet" : \
+			 (f) == AF_INET6 ? "inet6" : "any")
+
+/* Register a set type structure. The type is identified by
+ * the unique triple of name, family and revision.
+ */
+int
+ip_set_type_register(struct ip_set_type *type)
+{
+	int ret = 0;
+
+	if (type->protocol != IPSET_PROTOCOL) {
+		pr_warning("ip_set type %s, family %s, revision %u uses "
+			   "wrong protocol version %u (want %u)\n",
+			   type->name, family_name(type->family),
+			   type->revision, type->protocol, IPSET_PROTOCOL);
+		return -EINVAL;
+	}
+
+	ip_set_type_lock();
+	if (find_set_type(type->name, type->family, type->revision)) {
+		/* Duplicate! */
+		pr_warning("ip_set type %s, family %s, revision %u "
+			   "already registered!\n", type->name,
+			   family_name(type->family), type->revision);
+		ret = -EINVAL;
+		goto unlock;
+	}
+	list_add_rcu(&type->list, &ip_set_type_list);
+	pr_debug("type %s, family %s, revision %u registered.\n",
+		 type->name, family_name(type->family), type->revision);
+unlock:
+	ip_set_type_unlock();
+	return ret;
+}
+EXPORT_SYMBOL_GPL(ip_set_type_register);
+
+/* Unregister a set type. There's a small race with ip_set_create */
+void
+ip_set_type_unregister(struct ip_set_type *type)
+{
+	ip_set_type_lock();
+	if (!find_set_type(type->name, type->family, type->revision)) {
+		pr_warning("ip_set type %s, family %s, revision %u "
+			   "not registered\n", type->name,
+			   family_name(type->family), type->revision);
+		goto unlock;
+	}
+	list_del_rcu(&type->list);
+	pr_debug("type %s, family %s, revision %u unregistered.\n",
+		 type->name, family_name(type->family), type->revision);
+unlock:
+	ip_set_type_unlock();
+
+	synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(ip_set_type_unregister);
+
+/* Utility functions */
+void *
+ip_set_alloc(size_t size)
+{
+	void *members = NULL;
+
+	if (size < KMALLOC_MAX_SIZE)
+		members = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+
+	if (members) {
+		pr_debug("%p: allocated with kmalloc\n", members);
+		return members;
+	}
+
+	members = vzalloc(size);
+	if (!members)
+		return NULL;
+	pr_debug("%p: allocated with vmalloc\n", members);
+
+	return members;
+}
+EXPORT_SYMBOL_GPL(ip_set_alloc);
+
+void
+ip_set_free(void *members)
+{
+	pr_debug("%p: free with %s\n", members,
+		 is_vmalloc_addr(members) ? "vfree" : "kfree");
+	if (is_vmalloc_addr(members))
+		vfree(members);
+	else
+		kfree(members);
+}
+EXPORT_SYMBOL_GPL(ip_set_free);
+
+static inline bool
+flag_nested(const struct nlattr *nla)
+{
+	return nla->nla_type & NLA_F_NESTED;
+}
+
+static const struct nla_policy ipaddr_policy[IPSET_ATTR_IPADDR_MAX + 1] = {
+	[IPSET_ATTR_IPADDR_IPV4]	= { .type = NLA_U32 },
+	[IPSET_ATTR_IPADDR_IPV6]	= { .type = NLA_BINARY,
+					    .len = sizeof(struct in6_addr) },
+};
+
+int
+ip_set_get_ipaddr4(struct nlattr *nla,  __be32 *ipaddr)
+{
+	struct nlattr *tb[IPSET_ATTR_IPADDR_MAX+1];
+
+	if (unlikely(!flag_nested(nla)))
+		return -IPSET_ERR_PROTOCOL;
+	if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy))
+		return -IPSET_ERR_PROTOCOL;
+	if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4)))
+		return -IPSET_ERR_PROTOCOL;
+
+	*ipaddr = nla_get_be32(tb[IPSET_ATTR_IPADDR_IPV4]);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ipaddr4);
+
+int
+ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
+{
+	struct nlattr *tb[IPSET_ATTR_IPADDR_MAX+1];
+
+	if (unlikely(!flag_nested(nla)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy))
+		return -IPSET_ERR_PROTOCOL;
+	if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6)))
+		return -IPSET_ERR_PROTOCOL;
+
+	memcpy(ipaddr, nla_data(tb[IPSET_ATTR_IPADDR_IPV6]),
+		sizeof(struct in6_addr));
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
+
+/*
+ * Creating/destroying/renaming/swapping affect the existence and
+ * the properties of a set. All of these can be executed from userspace
+ * only and serialized by the nfnl mutex indirectly from nfnetlink.
+ *
+ * Sets are identified by their index in ip_set_list and the index
+ * is used by the external references (set/SET netfilter modules).
+ *
+ * The set behind an index may change by swapping only, from userspace.
+ */
+
+static inline void
+__ip_set_get(ip_set_id_t index)
+{
+	atomic_inc(&ip_set_list[index]->ref);
+}
+
+static inline void
+__ip_set_put(ip_set_id_t index)
+{
+	atomic_dec(&ip_set_list[index]->ref);
+}
+
+/*
+ * Add, del and test set entries from kernel.
+ *
+ * The set behind the index must exist and must be referenced
+ * so it can't be destroyed (or changed) under our foot.
+ */
+
+int
+ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
+	    u8 family, u8 dim, u8 flags)
+{
+	struct ip_set *set = ip_set_list[index];
+	int ret = 0;
+
+	BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+	pr_debug("set %s, index %u\n", set->name, index);
+
+	if (dim < set->type->dimension ||
+	    !(family == set->family || set->family == AF_UNSPEC))
+		return 0;
+
+	read_lock_bh(&set->lock);
+	ret = set->variant->kadt(set, skb, IPSET_TEST, family, dim, flags);
+	read_unlock_bh(&set->lock);
+
+	if (ret == -EAGAIN) {
+		/* Type requests element to be completed */
+		pr_debug("element must be competed, ADD is triggered\n");
+		write_lock_bh(&set->lock);
+		set->variant->kadt(set, skb, IPSET_ADD, family, dim, flags);
+		write_unlock_bh(&set->lock);
+		ret = 1;
+	}
+
+	/* Convert error codes to nomatch */
+	return (ret < 0 ? 0 : ret);
+}
+EXPORT_SYMBOL_GPL(ip_set_test);
+
+int
+ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
+	   u8 family, u8 dim, u8 flags)
+{
+	struct ip_set *set = ip_set_list[index];
+	int ret;
+
+	BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+	pr_debug("set %s, index %u\n", set->name, index);
+
+	if (dim < set->type->dimension ||
+	    !(family == set->family || set->family == AF_UNSPEC))
+		return 0;
+
+	write_lock_bh(&set->lock);
+	ret = set->variant->kadt(set, skb, IPSET_ADD, family, dim, flags);
+	write_unlock_bh(&set->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(ip_set_add);
+
+int
+ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
+	   u8 family, u8 dim, u8 flags)
+{
+	struct ip_set *set = ip_set_list[index];
+	int ret = 0;
+
+	BUG_ON(set == NULL || atomic_read(&set->ref) == 0);
+	pr_debug("set %s, index %u\n", set->name, index);
+
+	if (dim < set->type->dimension ||
+	    !(family == set->family || set->family == AF_UNSPEC))
+		return 0;
+
+	write_lock_bh(&set->lock);
+	ret = set->variant->kadt(set, skb, IPSET_DEL, family, dim, flags);
+	write_unlock_bh(&set->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(ip_set_del);
+
+/*
+ * Find set by name, reference it once. The reference makes sure the
+ * thing pointed to, does not go away under our feet.
+ *
+ * The nfnl mutex must already be activated.
+ */
+ip_set_id_t
+ip_set_get_byname(const char *name, struct ip_set **set)
+{
+	ip_set_id_t i, index = IPSET_INVALID_ID;
+	struct ip_set *s;
+
+	for (i = 0; i < ip_set_max; i++) {
+		s = ip_set_list[i];
+		if (s != NULL && STREQ(s->name, name)) {
+			__ip_set_get(i);
+			index = i;
+			*set = s;
+		}
+	}
+
+	return index;
+}
+EXPORT_SYMBOL_GPL(ip_set_get_byname);
+
+/*
+ * If the given set pointer points to a valid set, decrement
+ * reference count by 1. The caller shall not assume the index
+ * to be valid, after calling this function.
+ *
+ * The nfnl mutex must already be activated.
+ */
+void
+ip_set_put_byindex(ip_set_id_t index)
+{
+	if (ip_set_list[index] != NULL) {
+		BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
+		__ip_set_put(index);
+	}
+}
+EXPORT_SYMBOL_GPL(ip_set_put_byindex);
+
+/*
+ * Get the name of a set behind a set index.
+ * We assume the set is referenced, so it does exist and
+ * can't be destroyed. The set cannot be renamed due to
+ * the referencing either.
+ *
+ * The nfnl mutex must already be activated.
+ */
+const char *
+ip_set_name_byindex(ip_set_id_t index)
+{
+	const struct ip_set *set = ip_set_list[index];
+
+	BUG_ON(set == NULL);
+	BUG_ON(atomic_read(&set->ref) == 0);
+
+	/* Referenced, so it's safe */
+	return set->name;
+}
+EXPORT_SYMBOL_GPL(ip_set_name_byindex);
+
+/*
+ * Routines to call by external subsystems, which do not
+ * call nfnl_lock for us.
+ */
+
+/*
+ * Find set by name, reference it once. The reference makes sure the
+ * thing pointed to, does not go away under our feet.
+ *
+ * The nfnl mutex is used in the function.
+ */
+ip_set_id_t
+ip_set_nfnl_get(const char *name)
+{
+	struct ip_set *s;
+	ip_set_id_t index;
+
+	nfnl_lock();
+	index = ip_set_get_byname(name, &s);
+	nfnl_unlock();
+
+	return index;
+}
+EXPORT_SYMBOL_GPL(ip_set_nfnl_get);
+
+/*
+ * Find set by index, reference it once. The reference makes sure the
+ * thing pointed to, does not go away under our feet.
+ *
+ * The nfnl mutex is used in the function.
+ */
+ip_set_id_t
+ip_set_nfnl_get_byindex(ip_set_id_t index)
+{
+	if (index > ip_set_max)
+		return IPSET_INVALID_ID;
+
+	nfnl_lock();
+	if (ip_set_list[index])
+		__ip_set_get(index);
+	else
+		index = IPSET_INVALID_ID;
+	nfnl_unlock();
+
+	return index;
+}
+EXPORT_SYMBOL_GPL(ip_set_nfnl_get_byindex);
+
+/*
+ * If the given set pointer points to a valid set, decrement
+ * reference count by 1. The caller shall not assume the index
+ * to be valid, after calling this function.
+ *
+ * The nfnl mutex is used in the function.
+ */
+void
+ip_set_nfnl_put(ip_set_id_t index)
+{
+	nfnl_lock();
+	if (ip_set_list[index] != NULL) {
+		BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
+		__ip_set_put(index);
+	}
+	nfnl_unlock();
+}
+EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
+
+/*
+ * Communication protocol with userspace over netlink.
+ *
+ * We already locked by nfnl_lock.
+ */
+
+static inline bool
+protocol_failed(const struct nlattr * const tb[])
+{
+	return !tb[IPSET_ATTR_PROTOCOL] ||
+	       nla_get_u8(tb[IPSET_ATTR_PROTOCOL]) != IPSET_PROTOCOL;
+}
+
+static inline u32
+flag_exist(const struct nlmsghdr *nlh)
+{
+	return nlh->nlmsg_flags & NLM_F_EXCL ? 0 : IPSET_FLAG_EXIST;
+}
+
+static struct nlmsghdr *
+start_msg(struct sk_buff *skb, u32 pid, u32 seq, unsigned int flags,
+	  enum ipset_cmd cmd)
+{
+	struct nlmsghdr *nlh;
+	struct nfgenmsg *nfmsg;
+
+	nlh = nlmsg_put(skb, pid, seq, cmd | (NFNL_SUBSYS_IPSET << 8),
+			sizeof(*nfmsg), flags);
+	if (nlh == NULL)
+		return NULL;
+
+	nfmsg = nlmsg_data(nlh);
+	nfmsg->nfgen_family = AF_INET;
+	nfmsg->version = NFNETLINK_V0;
+	nfmsg->res_id = 0;
+
+	return nlh;
+}
+
+/* Create a set */
+
+static const struct nla_policy ip_set_create_policy[IPSET_ATTR_CMD_MAX + 1] = {
+	[IPSET_ATTR_PROTOCOL]	= { .type = NLA_U8 },
+	[IPSET_ATTR_SETNAME]	= { .type = NLA_NUL_STRING,
+				    .len = IPSET_MAXNAMELEN - 1 },
+	[IPSET_ATTR_TYPENAME]	= { .type = NLA_NUL_STRING,
+				    .len = IPSET_MAXNAMELEN - 1},
+	[IPSET_ATTR_REVISION]	= { .type = NLA_U8 },
+	[IPSET_ATTR_FAMILY]	= { .type = NLA_U8 },
+	[IPSET_ATTR_DATA]	= { .type = NLA_NESTED },
+};
+
+static ip_set_id_t
+find_set_id(const char *name)
+{
+	ip_set_id_t i, index = IPSET_INVALID_ID;
+	const struct ip_set *set;
+
+	for (i = 0; index == IPSET_INVALID_ID && i < ip_set_max; i++) {
+		set = ip_set_list[i];
+		if (set != NULL && STREQ(set->name, name))
+			index = i;
+	}
+	return index;
+}
+
+static inline struct ip_set *
+find_set(const char *name)
+{
+	ip_set_id_t index = find_set_id(name);
+
+	return index == IPSET_INVALID_ID ? NULL : ip_set_list[index];
+}
+
+static int
+find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
+{
+	ip_set_id_t i;
+
+	*index = IPSET_INVALID_ID;
+	for (i = 0;  i < ip_set_max; i++) {
+		if (ip_set_list[i] == NULL) {
+			if (*index == IPSET_INVALID_ID)
+				*index = i;
+		} else if (STREQ(name, ip_set_list[i]->name)) {
+			/* Name clash */
+			*set = ip_set_list[i];
+			return -EEXIST;
+		}
+	}
+	if (*index == IPSET_INVALID_ID)
+		/* No free slot remained */
+		return -IPSET_ERR_MAX_SETS;
+	return 0;
+}
+
+static int
+ip_set_create(struct sock *ctnl, struct sk_buff *skb,
+	      const struct nlmsghdr *nlh,
+	      const struct nlattr * const attr[])
+{
+	struct ip_set *set, *clash;
+	ip_set_id_t index = IPSET_INVALID_ID;
+	struct nlattr *tb[IPSET_ATTR_CREATE_MAX+1] = {};
+	const char *name, *typename;
+	u8 family, revision;
+	u32 flags = flag_exist(nlh);
+	int ret = 0;
+
+	if (unlikely(protocol_failed(attr) ||
+		     attr[IPSET_ATTR_SETNAME] == NULL ||
+		     attr[IPSET_ATTR_TYPENAME] == NULL ||
+		     attr[IPSET_ATTR_REVISION] == NULL ||
+		     attr[IPSET_ATTR_FAMILY] == NULL ||
+		     (attr[IPSET_ATTR_DATA] != NULL &&
+		      !flag_nested(attr[IPSET_ATTR_DATA]))))
+		return -IPSET_ERR_PROTOCOL;
+
+	name = nla_data(attr[IPSET_ATTR_SETNAME]);
+	typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
+	family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
+	revision = nla_get_u8(attr[IPSET_ATTR_REVISION]);
+	pr_debug("setname: %s, typename: %s, family: %s, revision: %u\n",
+		 name, typename, family_name(family), revision);
+
+	/*
+	 * First, and without any locks, allocate and initialize
+	 * a normal base set structure.
+	 */
+	set = kzalloc(sizeof(struct ip_set), GFP_KERNEL);
+	if (!set)
+		return -ENOMEM;
+	rwlock_init(&set->lock);
+	strlcpy(set->name, name, IPSET_MAXNAMELEN);
+	atomic_set(&set->ref, 0);
+	set->family = family;
+
+	/*
+	 * Next, check that we know the type, and take
+	 * a reference on the type, to make sure it stays available
+	 * while constructing our new set.
+	 *
+	 * After referencing the type, we try to create the type
+	 * specific part of the set without holding any locks.
+	 */
+	ret = find_set_type_get(typename, family, revision, &(set->type));
+	if (ret)
+		goto out;
+
+	/*
+	 * Without holding any locks, create private part.
+	 */
+	if (attr[IPSET_ATTR_DATA] &&
+	    nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA],
+			     set->type->create_policy)) {
+	    	ret = -IPSET_ERR_PROTOCOL;
+	    	goto put_out;
+	}
+
+	ret = set->type->create(set, tb, flags);
+	if (ret != 0)
+		goto put_out;
+
+	/* BTW, ret==0 here. */
+
+	/*
+	 * Here, we have a valid, constructed set and we are protected
+	 * by nfnl_lock. Find the first free index in ip_set_list and
+	 * check clashing.
+	 */
+	if ((ret = find_free_id(set->name, &index, &clash)) != 0) {
+		/* If this is the same set and requested, ignore error */
+		if (ret == -EEXIST &&
+		    (flags & IPSET_FLAG_EXIST) &&
+		    STREQ(set->type->name, clash->type->name) &&
+		    set->type->family == clash->type->family &&
+		    set->type->revision == clash->type->revision &&
+		    set->variant->same_set(set, clash))
+			ret = 0;
+		goto cleanup;
+	}
+
+	/*
+	 * Finally! Add our shiny new set to the list, and be done.
+	 */
+	pr_debug("create: '%s' created with index %u!\n", set->name, index);
+	ip_set_list[index] = set;
+
+	return ret;
+
+cleanup:
+	set->variant->destroy(set);
+put_out:
+	module_put(set->type->me);
+out:
+	kfree(set);
+	return ret;
+}
+
+/* Destroy sets */
+
+static const struct nla_policy
+ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
+	[IPSET_ATTR_PROTOCOL]	= { .type = NLA_U8 },
+	[IPSET_ATTR_SETNAME]	= { .type = NLA_NUL_STRING,
+				    .len = IPSET_MAXNAMELEN - 1 },
+};
+
+static void
+ip_set_destroy_set(ip_set_id_t index)
+{
+	struct ip_set *set = ip_set_list[index];
+
+	pr_debug("set: %s\n",  set->name);
+	ip_set_list[index] = NULL;
+
+	/* Must call it without holding any lock */
+	set->variant->destroy(set);
+	module_put(set->type->me);
+	kfree(set);
+}
+
+static int
+ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
+	       const struct nlmsghdr *nlh,
+	       const struct nlattr * const attr[])
+{
+	ip_set_id_t i;
+
+	if (unlikely(protocol_failed(attr)))
+		return -IPSET_ERR_PROTOCOL;
+
+	/* References are protected by the nfnl mutex */
+	if (!attr[IPSET_ATTR_SETNAME]) {
+		for (i = 0; i < ip_set_max; i++) {
+			if (ip_set_list[i] != NULL &&
+			    (atomic_read(&ip_set_list[i]->ref)))
+				return -IPSET_ERR_BUSY;
+		}
+		for (i = 0; i < ip_set_max; i++) {
+			if (ip_set_list[i] != NULL)
+				ip_set_destroy_set(i);
+		}
+	} else {
+		i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+		if (i == IPSET_INVALID_ID)
+			return -ENOENT;
+		else if (atomic_read(&ip_set_list[i]->ref))
+			return -IPSET_ERR_BUSY;
+
+		ip_set_destroy_set(i);
+	}
+	return 0;
+}
+
+/* Flush sets */
+
+static void
+ip_set_flush_set(struct ip_set *set)
+{
+	pr_debug("set: %s\n",  set->name);
+
+	write_lock_bh(&set->lock);
+	set->variant->flush(set);
+	write_unlock_bh(&set->lock);
+}
+
+static int
+ip_set_flush(struct sock *ctnl, struct sk_buff *skb,
+	     const struct nlmsghdr *nlh,
+	     const struct nlattr * const attr[])
+{
+	ip_set_id_t i;
+
+	if (unlikely(protocol_failed(attr)))
+		return -EPROTO;
+
+	if (!attr[IPSET_ATTR_SETNAME]) {
+		for (i = 0; i < ip_set_max; i++)
+			if (ip_set_list[i] != NULL)
+				ip_set_flush_set(ip_set_list[i]);
+	} else {
+		i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+		if (i == IPSET_INVALID_ID)
+			return -ENOENT;
+
+		ip_set_flush_set(ip_set_list[i]);
+	}
+
+	return 0;
+}
+
+/* Rename a set */
+
+static const struct nla_policy
+ip_set_setname2_policy[IPSET_ATTR_CMD_MAX + 1] = {
+	[IPSET_ATTR_PROTOCOL]	= { .type = NLA_U8 },
+	[IPSET_ATTR_SETNAME]	= { .type = NLA_NUL_STRING,
+				    .len = IPSET_MAXNAMELEN - 1 },
+	[IPSET_ATTR_SETNAME2]	= { .type = NLA_NUL_STRING,
+				    .len = IPSET_MAXNAMELEN - 1 },
+};
+
+static int
+ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
+	      const struct nlmsghdr *nlh,
+	      const struct nlattr * const attr[])
+{
+	struct ip_set *set;
+	const char *name2;
+	ip_set_id_t i;
+
+	if (unlikely(protocol_failed(attr) ||
+		     attr[IPSET_ATTR_SETNAME] == NULL ||
+		     attr[IPSET_ATTR_SETNAME2] == NULL))
+		return -IPSET_ERR_PROTOCOL;
+
+	set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+	if (set == NULL)
+		return -ENOENT;
+	if (atomic_read(&set->ref) != 0)
+		return -IPSET_ERR_REFERENCED;
+
+	name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
+	for (i = 0; i < ip_set_max; i++) {
+		if (ip_set_list[i] != NULL &&
+		    STREQ(ip_set_list[i]->name, name2))
+			return -IPSET_ERR_EXIST_SETNAME2;
+	}
+	strncpy(set->name, name2, IPSET_MAXNAMELEN);
+
+	return 0;
+}
+
+/* Swap two sets so that name/index points to the other.
+ * References and set names are also swapped.
+ *
+ * We are protected by the nfnl mutex and references are
+ * manipulated only by holding the mutex. The kernel interfaces
+ * do not hold the mutex but the pointer settings are atomic
+ * so the ip_set_list always contains valid pointers to the sets.
+ */
+
+static int
+ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
+	    const struct nlmsghdr *nlh,
+	    const struct nlattr * const attr[])
+{
+	struct ip_set *from, *to;
+	ip_set_id_t from_id, to_id;
+	char from_name[IPSET_MAXNAMELEN];
+	u32 from_ref;
+
+	if (unlikely(protocol_failed(attr) ||
+		     attr[IPSET_ATTR_SETNAME] == NULL ||
+		     attr[IPSET_ATTR_SETNAME2] == NULL))
+		return -IPSET_ERR_PROTOCOL;
+
+	from_id = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+	if (from_id == IPSET_INVALID_ID)
+		return -ENOENT;
+
+	to_id = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME2]));
+	if (to_id == IPSET_INVALID_ID)
+		return -IPSET_ERR_EXIST_SETNAME2;
+
+	from = ip_set_list[from_id];
+	to = ip_set_list[to_id];
+
+	/* Features must not change.
+	 * Not an artifical restriction anymore, as we must prevent
+	 * possible loops created by swapping in setlist type of sets. */
+	if (!(from->type->features == to->type->features &&
+	      from->type->family == to->type->family))
+		return -IPSET_ERR_TYPE_MISMATCH;
+
+	/* No magic here: ref munging protected by the nfnl_lock */
+	strncpy(from_name, from->name, IPSET_MAXNAMELEN);
+	from_ref = atomic_read(&from->ref);
+
+	strncpy(from->name, to->name, IPSET_MAXNAMELEN);
+	atomic_set(&from->ref, atomic_read(&to->ref));
+	strncpy(to->name, from_name, IPSET_MAXNAMELEN);
+	atomic_set(&to->ref, from_ref);
+
+	ip_set_list[from_id] = to;
+	ip_set_list[to_id] = from;
+
+	return 0;
+}
+
+/* List/save set data */
+
+#define DUMP_INIT	0L
+#define DUMP_ALL	1L
+#define DUMP_ONE	2L
+#define DUMP_LAST	3L
+
+static int
+ip_set_dump_done(struct netlink_callback *cb)
+{
+	if (cb->args[2]) {
+		pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name);
+		__ip_set_put((ip_set_id_t) cb->args[1]);
+	}
+	return 0;
+}
+
+static inline void
+dump_attrs(struct nlmsghdr *nlh)
+{
+	const struct nlattr *attr;
+	int rem;
+
+	pr_debug("dump nlmsg\n");
+	nlmsg_for_each_attr(attr, nlh, sizeof(struct nfgenmsg), rem) {
+		pr_debug("type: %u, len %u\n", nla_type(attr), attr->nla_len);
+	}
+}
+
+static int
+dump_init(struct netlink_callback *cb)
+{
+	struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
+	int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg));
+	struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
+	struct nlattr *attr = (void *)nlh + min_len;
+	ip_set_id_t index;
+
+	/* Second pass, so parser can't fail */
+	nla_parse(cda, IPSET_ATTR_CMD_MAX,
+		  attr, nlh->nlmsg_len - min_len, ip_set_setname_policy);
+
+	/* cb->args[0] : dump single set/all sets
+	 *         [1] : set index
+	 *         [..]: type specific
+	 */
+
+	if (!cda[IPSET_ATTR_SETNAME]) {
+		cb->args[0] = DUMP_ALL;
+		return 0;
+	}
+
+	index = find_set_id(nla_data(cda[IPSET_ATTR_SETNAME]));
+	if (index == IPSET_INVALID_ID)
+		return -ENOENT;
+
+	cb->args[0] = DUMP_ONE;
+	cb->args[1] = index;
+	return 0;
+}
+
+static int
+ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
+{
+	ip_set_id_t index = IPSET_INVALID_ID, max;
+	struct ip_set *set = NULL;
+	struct nlmsghdr *nlh = NULL;
+	unsigned int flags = NETLINK_CB(cb->skb).pid ? NLM_F_MULTI : 0;
+	int ret = 0;
+
+	if (cb->args[0] == DUMP_INIT) {
+		ret = dump_init(cb);
+		if (ret < 0) {
+			nlh = nlmsg_hdr(cb->skb);
+			/* We have to create and send the error message
+			 * manually :-( */
+			if (nlh->nlmsg_flags & NLM_F_ACK)
+				netlink_ack(cb->skb, nlh, ret);
+			return ret;
+		}
+	}
+
+	if (cb->args[1] >= ip_set_max)
+		goto out;
+
+	pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]);
+	max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
+	for (; cb->args[1] < max; cb->args[1]++) {
+		index = (ip_set_id_t) cb->args[1];
+		set = ip_set_list[index];
+		if (set == NULL) {
+			if (cb->args[0] == DUMP_ONE) {
+				ret = -ENOENT;
+				goto out;
+			}
+			continue;
+		}
+		/* When dumping all sets, we must dump "sorted"
+		 * so that lists (unions of sets) are dumped last.
+		 */
+		if (cb->args[0] != DUMP_ONE &&
+		    !((cb->args[0] == DUMP_ALL) ^
+		      (set->type->features & IPSET_DUMP_LAST)))
+			continue;
+		pr_debug("List set: %s\n", set->name);
+		if (!cb->args[2]) {
+			/* Start listing: make sure set won't be destroyed */
+			pr_debug("reference set\n");
+			__ip_set_get(index);
+		}
+		nlh = start_msg(skb, NETLINK_CB(cb->skb).pid,
+				cb->nlh->nlmsg_seq, flags,
+				IPSET_CMD_LIST);
+		if (!nlh) {
+			ret = -EMSGSIZE;
+			goto release_refcount;
+		}
+		NLA_PUT_U8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+		NLA_PUT_STRING(skb, IPSET_ATTR_SETNAME, set->name);
+		switch (cb->args[2]) {
+		case 0:
+			/* Core header data */
+			NLA_PUT_STRING(skb, IPSET_ATTR_TYPENAME,
+				       set->type->name);
+			NLA_PUT_U8(skb, IPSET_ATTR_FAMILY,
+				   set->family);
+			NLA_PUT_U8(skb, IPSET_ATTR_REVISION,
+				   set->type->revision);
+			ret = set->variant->head(set, skb);
+			if (ret < 0)
+				goto release_refcount;
+			/* Fall through and add elements */
+		default:
+			read_lock_bh(&set->lock);
+			ret = set->variant->list(set, skb, cb);
+			read_unlock_bh(&set->lock);
+			if (!cb->args[2]) {
+				/* Set is done, proceed with next one */
+				if (cb->args[0] == DUMP_ONE)
+					cb->args[1] = IPSET_INVALID_ID;
+				else
+					cb->args[1]++;
+			}
+			goto release_refcount;
+		}
+	}
+	goto out;
+
+nla_put_failure:
+	ret = -EFAULT;
+release_refcount:
+	/* If there was an error or set is done, release set */
+	if (ret || !cb->args[2]) {
+		pr_debug("release set %s\n", ip_set_list[index]->name);
+		__ip_set_put(index);
+	}
+
+	/* If we dump all sets, continue with dumping last ones */
+	if (cb->args[0] == DUMP_ALL && cb->args[1] >= max && !cb->args[2])
+		cb->args[0] = DUMP_LAST;
+
+out:
+	if (nlh) {
+		nlmsg_end(skb, nlh);
+		pr_debug("nlmsg_len: %u\n", nlh->nlmsg_len);
+		dump_attrs(nlh);
+	}
+
+	return ret < 0 ? ret : skb->len;
+}
+
+static int
+ip_set_dump(struct sock *ctnl, struct sk_buff *skb,
+	    const struct nlmsghdr *nlh,
+	    const struct nlattr * const attr[])
+{
+	if (unlikely(protocol_failed(attr)))
+		return -IPSET_ERR_PROTOCOL;
+
+	return netlink_dump_start(ctnl, skb, nlh,
+				  ip_set_dump_start,
+				  ip_set_dump_done);
+}
+
+/* Add, del and test */
+
+static const struct nla_policy ip_set_adt_policy[IPSET_ATTR_CMD_MAX + 1] = {
+	[IPSET_ATTR_PROTOCOL]	= { .type = NLA_U8 },
+	[IPSET_ATTR_SETNAME]	= { .type = NLA_NUL_STRING,
+				    .len = IPSET_MAXNAMELEN - 1 },
+	[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
+	[IPSET_ATTR_DATA]	= { .type = NLA_NESTED },
+	[IPSET_ATTR_ADT]	= { .type = NLA_NESTED },
+};
+
+static int
+call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
+	struct nlattr *tb[], enum ipset_adt adt,
+	u32 flags, bool use_lineno)
+{
+	int ret, retried = 0;
+	u32 lineno = 0;
+	bool eexist = flags & IPSET_FLAG_EXIST;
+
+	do {
+		write_lock_bh(&set->lock);
+		ret = set->variant->uadt(set, tb, adt, &lineno, flags);
+		write_unlock_bh(&set->lock);
+	} while (ret == -EAGAIN &&
+		 set->variant->resize &&
+		 (ret = set->variant->resize(set, retried++)) == 0);
+
+	if (!ret || (ret == -IPSET_ERR_EXIST && eexist))
+		return 0;
+	if (lineno && use_lineno) {
+		/* Error in restore/batch mode: send back lineno */
+		struct nlmsghdr *rep, *nlh = nlmsg_hdr(skb);
+		struct sk_buff *skb2;
+		struct nlmsgerr *errmsg;
+		size_t payload = sizeof(*errmsg) + nlmsg_len(nlh);
+		int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg));
+		struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
+		struct nlattr *cmdattr;
+		u32 *errline;
+
+		skb2 = nlmsg_new(payload, GFP_KERNEL);
+		if (skb2 == NULL)
+			return -ENOMEM;
+		rep = __nlmsg_put(skb2, NETLINK_CB(skb).pid,
+				  nlh->nlmsg_seq, NLMSG_ERROR, payload, 0);
+		errmsg = nlmsg_data(rep);
+		errmsg->error = ret;
+		memcpy(&errmsg->msg, nlh, nlh->nlmsg_len);
+		cmdattr = (void *)&errmsg->msg + min_len;
+
+		nla_parse(cda, IPSET_ATTR_CMD_MAX,
+			  cmdattr, nlh->nlmsg_len - min_len,
+			  ip_set_adt_policy);
+
+		errline = nla_data(cda[IPSET_ATTR_LINENO]);
+
+		*errline = lineno;
+
+		netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+		/* Signal netlink not to send its ACK/errmsg.  */
+		return -EINTR;
+	}
+
+	return ret;
+}
+
+static int
+ip_set_uadd(struct sock *ctnl, struct sk_buff *skb,
+	    const struct nlmsghdr *nlh,
+	    const struct nlattr * const attr[])
+{
+	struct ip_set *set;
+	struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+	const struct nlattr *nla;
+	u32 flags = flag_exist(nlh);
+	bool use_lineno;
+	int ret = 0;
+
+	if (unlikely(protocol_failed(attr) ||
+		     attr[IPSET_ATTR_SETNAME] == NULL ||
+		     !((attr[IPSET_ATTR_DATA] != NULL) ^
+		       (attr[IPSET_ATTR_ADT] != NULL)) ||
+		     (attr[IPSET_ATTR_DATA] != NULL &&
+		      !flag_nested(attr[IPSET_ATTR_DATA])) ||
+		     (attr[IPSET_ATTR_ADT] != NULL &&
+		      (!flag_nested(attr[IPSET_ATTR_ADT]) ||
+		       attr[IPSET_ATTR_LINENO] == NULL))))
+		return -IPSET_ERR_PROTOCOL;
+
+	set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+	if (set == NULL)
+		return -ENOENT;
+
+	use_lineno = !!attr[IPSET_ATTR_LINENO];
+	if (attr[IPSET_ATTR_DATA]) {
+		if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
+				     attr[IPSET_ATTR_DATA],
+				     set->type->adt_policy))
+			return -IPSET_ERR_PROTOCOL;
+		ret = call_ad(ctnl, skb, set, tb, IPSET_ADD, flags,
+			      use_lineno);
+	} else {
+		int nla_rem;
+
+		nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
+			memset(tb, 0, sizeof(tb));
+			if (nla_type(nla) != IPSET_ATTR_DATA ||
+			    !flag_nested(nla) ||
+			    nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
+					     set->type->adt_policy))
+				return -IPSET_ERR_PROTOCOL;
+			ret = call_ad(ctnl, skb, set, tb, IPSET_ADD,
+				      flags, use_lineno);
+			if (ret < 0)
+				return ret;
+		}
+	}
+	return ret;
+}
+
+static int
+ip_set_udel(struct sock *ctnl, struct sk_buff *skb,
+	    const struct nlmsghdr *nlh,
+	    const struct nlattr * const attr[])
+{
+	struct ip_set *set;
+	struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+	const struct nlattr *nla;
+	u32 flags = flag_exist(nlh);
+	bool use_lineno;
+	int ret = 0;
+
+	if (unlikely(protocol_failed(attr) ||
+		     attr[IPSET_ATTR_SETNAME] == NULL ||
+		     !((attr[IPSET_ATTR_DATA] != NULL) ^
+		       (attr[IPSET_ATTR_ADT] != NULL)) ||
+		     (attr[IPSET_ATTR_DATA] != NULL &&
+		      !flag_nested(attr[IPSET_ATTR_DATA])) ||
+		     (attr[IPSET_ATTR_ADT] != NULL &&
+		      (!flag_nested(attr[IPSET_ATTR_ADT]) ||
+		       attr[IPSET_ATTR_LINENO] == NULL))))
+		return -IPSET_ERR_PROTOCOL;
+
+	set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+	if (set == NULL)
+		return -ENOENT;
+
+	use_lineno = !!attr[IPSET_ATTR_LINENO];
+	if (attr[IPSET_ATTR_DATA]) {
+		if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
+				     attr[IPSET_ATTR_DATA],
+				     set->type->adt_policy))
+			return -IPSET_ERR_PROTOCOL;
+		ret = call_ad(ctnl, skb, set, tb, IPSET_DEL, flags,
+			      use_lineno);
+	} else {
+		int nla_rem;
+
+		nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
+			memset(tb, 0, sizeof(*tb));
+			if (nla_type(nla) != IPSET_ATTR_DATA ||
+			    !flag_nested(nla) ||
+			    nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
+					     set->type->adt_policy))
+				return -IPSET_ERR_PROTOCOL;
+			ret = call_ad(ctnl, skb, set, tb, IPSET_DEL,
+				      flags, use_lineno);
+			if (ret < 0)
+				return ret;
+		}
+	}
+	return ret;
+}
+
+static int
+ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
+	     const struct nlmsghdr *nlh,
+	     const struct nlattr * const attr[])
+{
+	struct ip_set *set;
+	struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
+	int ret = 0;
+
+	if (unlikely(protocol_failed(attr) ||
+		     attr[IPSET_ATTR_SETNAME] == NULL ||
+		     attr[IPSET_ATTR_DATA] == NULL ||
+		     !flag_nested(attr[IPSET_ATTR_DATA])))
+		return -IPSET_ERR_PROTOCOL;
+
+	set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+	if (set == NULL)
+		return -ENOENT;
+
+	if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA],
+			     set->type->adt_policy))
+		return -IPSET_ERR_PROTOCOL;
+
+	read_lock_bh(&set->lock);
+	ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0);
+	read_unlock_bh(&set->lock);
+	/* Userspace can't trigger element to be re-added */
+	if (ret == -EAGAIN)
+		ret = 1;
+
+	return ret < 0 ? ret : ret > 0 ? 0 : -IPSET_ERR_EXIST;
+}
+
+/* Get headed data of a set */
+
+static int
+ip_set_header(struct sock *ctnl, struct sk_buff *skb,
+	      const struct nlmsghdr *nlh,
+	      const struct nlattr * const attr[])
+{
+	const struct ip_set *set;
+	struct sk_buff *skb2;
+	struct nlmsghdr *nlh2;
+	ip_set_id_t index;
+	int ret = 0;
+
+	if (unlikely(protocol_failed(attr) ||
+		     attr[IPSET_ATTR_SETNAME] == NULL))
+		return -IPSET_ERR_PROTOCOL;
+
+	index = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
+	if (index == IPSET_INVALID_ID)
+		return -ENOENT;
+	set = ip_set_list[index];
+
+	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	if (skb2 == NULL)
+		return -ENOMEM;
+
+	nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+			 IPSET_CMD_HEADER);
+	if (!nlh2)
+		goto nlmsg_failure;
+	NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+	NLA_PUT_STRING(skb2, IPSET_ATTR_SETNAME, set->name);
+	NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, set->type->name);
+	NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, set->family);
+	NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, set->type->revision);
+	nlmsg_end(skb2, nlh2);
+
+	ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+
+nla_put_failure:
+	nlmsg_cancel(skb2, nlh2);
+nlmsg_failure:
+	kfree_skb(skb2);
+	return -EMSGSIZE;
+}
+
+/* Get type data */
+
+static const struct nla_policy ip_set_type_policy[IPSET_ATTR_CMD_MAX + 1] = {
+	[IPSET_ATTR_PROTOCOL]	= { .type = NLA_U8 },
+	[IPSET_ATTR_TYPENAME]	= { .type = NLA_NUL_STRING,
+				    .len = IPSET_MAXNAMELEN - 1 },
+	[IPSET_ATTR_FAMILY]	= { .type = NLA_U8 },
+};
+
+static int
+ip_set_type(struct sock *ctnl, struct sk_buff *skb,
+	    const struct nlmsghdr *nlh,
+	    const struct nlattr * const attr[])
+{
+	struct sk_buff *skb2;
+	struct nlmsghdr *nlh2;
+	u8 family, min, max;
+	const char *typename;
+	int ret = 0;
+
+	if (unlikely(protocol_failed(attr) ||
+		     attr[IPSET_ATTR_TYPENAME] == NULL ||
+		     attr[IPSET_ATTR_FAMILY] == NULL))
+		return -IPSET_ERR_PROTOCOL;
+
+	family = nla_get_u8(attr[IPSET_ATTR_FAMILY]);
+	typename = nla_data(attr[IPSET_ATTR_TYPENAME]);
+	ret = find_set_type_minmax(typename, family, &min, &max);
+	if (ret)
+		return ret;
+
+	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	if (skb2 == NULL)
+		return -ENOMEM;
+
+	nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+			 IPSET_CMD_TYPE);
+	if (!nlh2)
+		goto nlmsg_failure;
+	NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+	NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, typename);
+	NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, family);
+	NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, max);
+	NLA_PUT_U8(skb2, IPSET_ATTR_REVISION_MIN, min);
+	nlmsg_end(skb2, nlh2);
+
+	pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len);
+	ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+
+nla_put_failure:
+	nlmsg_cancel(skb2, nlh2);
+nlmsg_failure:
+	kfree_skb(skb2);
+	return -EMSGSIZE;
+}
+
+/* Get protocol version */
+
+static const struct nla_policy
+ip_set_protocol_policy[IPSET_ATTR_CMD_MAX + 1] = {
+	[IPSET_ATTR_PROTOCOL]	= { .type = NLA_U8 },
+};
+
+static int
+ip_set_protocol(struct sock *ctnl, struct sk_buff *skb,
+		const struct nlmsghdr *nlh,
+		const struct nlattr * const attr[])
+{
+	struct sk_buff *skb2;
+	struct nlmsghdr *nlh2;
+	int ret = 0;
+
+	if (unlikely(attr[IPSET_ATTR_PROTOCOL] == NULL))
+		return -IPSET_ERR_PROTOCOL;
+
+	skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	if (skb2 == NULL)
+		return -ENOMEM;
+
+	nlh2 = start_msg(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0,
+			 IPSET_CMD_PROTOCOL);
+	if (!nlh2)
+		goto nlmsg_failure;
+	NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
+	nlmsg_end(skb2, nlh2);
+
+	ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+
+nla_put_failure:
+	nlmsg_cancel(skb2, nlh2);
+nlmsg_failure:
+	kfree_skb(skb2);
+	return -EMSGSIZE;
+}
+
+static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
+	[IPSET_CMD_CREATE]	= {
+		.call		= ip_set_create,
+		.attr_count	= IPSET_ATTR_CMD_MAX,
+		.policy		= ip_set_create_policy,
+	},
+	[IPSET_CMD_DESTROY]	= {
+		.call		= ip_set_destroy,
+		.attr_count	= IPSET_ATTR_CMD_MAX,
+		.policy		= ip_set_setname_policy,
+	},
+	[IPSET_CMD_FLUSH]	= {
+		.call		= ip_set_flush,
+		.attr_count	= IPSET_ATTR_CMD_MAX,
+		.policy		= ip_set_setname_policy,
+	},
+	[IPSET_CMD_RENAME]	= {
+		.call		= ip_set_rename,
+		.attr_count	= IPSET_ATTR_CMD_MAX,
+		.policy		= ip_set_setname2_policy,
+	},
+	[IPSET_CMD_SWAP]	= {
+		.call		= ip_set_swap,
+		.attr_count	= IPSET_ATTR_CMD_MAX,
+		.policy		= ip_set_setname2_policy,
+	},
+	[IPSET_CMD_LIST]	= {
+		.call		= ip_set_dump,
+		.attr_count	= IPSET_ATTR_CMD_MAX,
+		.policy		= ip_set_setname_policy,
+	},
+	[IPSET_CMD_SAVE]	= {
+		.call		= ip_set_dump,
+		.attr_count	= IPSET_ATTR_CMD_MAX,
+		.policy		= ip_set_setname_policy,
+	},
+	[IPSET_CMD_ADD]	= {
+		.call		= ip_set_uadd,
+		.attr_count	= IPSET_ATTR_CMD_MAX,
+		.policy		= ip_set_adt_policy,
+	},
+	[IPSET_CMD_DEL]	= {
+		.call		= ip_set_udel,
+		.attr_count	= IPSET_ATTR_CMD_MAX,
+		.policy		= ip_set_adt_policy,
+	},
+	[IPSET_CMD_TEST]	= {
+		.call		= ip_set_utest,
+		.attr_count	= IPSET_ATTR_CMD_MAX,
+		.policy		= ip_set_adt_policy,
+	},
+	[IPSET_CMD_HEADER]	= {
+		.call		= ip_set_header,
+		.attr_count	= IPSET_ATTR_CMD_MAX,
+		.policy		= ip_set_setname_policy,
+	},
+	[IPSET_CMD_TYPE]	= {
+		.call		= ip_set_type,
+		.attr_count	= IPSET_ATTR_CMD_MAX,
+		.policy		= ip_set_type_policy,
+	},
+	[IPSET_CMD_PROTOCOL]	= {
+		.call		= ip_set_protocol,
+		.attr_count	= IPSET_ATTR_CMD_MAX,
+		.policy		= ip_set_protocol_policy,
+	},
+};
+
+static struct nfnetlink_subsystem ip_set_netlink_subsys __read_mostly = {
+	.name		= "ip_set",
+	.subsys_id	= NFNL_SUBSYS_IPSET,
+	.cb_count	= IPSET_MSG_MAX,
+	.cb		= ip_set_netlink_subsys_cb,
+};
+
+/* Interface to iptables/ip6tables */
+
+static int
+ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
+{
+	unsigned *op;
+	void *data;
+	int copylen = *len, ret = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+	if (optval != SO_IP_SET)
+		return -EBADF;
+	if (*len < sizeof(unsigned))
+		return -EINVAL;
+
+	data = vmalloc(*len);
+	if (!data)
+		return -ENOMEM;
+	if (copy_from_user(data, user, *len) != 0) {
+		ret = -EFAULT;
+		goto done;
+	}
+	op = (unsigned *) data;
+
+	if (*op < IP_SET_OP_VERSION) {
+		/* Check the version at the beginning of operations */
+		struct ip_set_req_version *req_version = data;
+		if (req_version->version != IPSET_PROTOCOL) {
+			ret = -EPROTO;
+			goto done;
+		}
+	}
+
+	switch (*op) {
+	case IP_SET_OP_VERSION: {
+		struct ip_set_req_version *req_version = data;
+
+		if (*len != sizeof(struct ip_set_req_version)) {
+			ret = -EINVAL;
+			goto done;
+		}
+
+		req_version->version = IPSET_PROTOCOL;
+		ret = copy_to_user(user, req_version,
+				   sizeof(struct ip_set_req_version));
+		goto done;
+	}
+	case IP_SET_OP_GET_BYNAME: {
+		struct ip_set_req_get_set *req_get = data;
+
+		if (*len != sizeof(struct ip_set_req_get_set)) {
+			ret = -EINVAL;
+			goto done;
+		}
+		req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
+		nfnl_lock();
+		req_get->set.index = find_set_id(req_get->set.name);
+		nfnl_unlock();
+		goto copy;
+	}
+	case IP_SET_OP_GET_BYINDEX: {
+		struct ip_set_req_get_set *req_get = data;
+
+		if (*len != sizeof(struct ip_set_req_get_set) ||
+		    req_get->set.index >= ip_set_max) {
+			ret = -EINVAL;
+			goto done;
+		}
+		nfnl_lock();
+		strncpy(req_get->set.name,
+			ip_set_list[req_get->set.index]
+				? ip_set_list[req_get->set.index]->name : "",
+			IPSET_MAXNAMELEN);
+		nfnl_unlock();
+		goto copy;
+	}
+	default:
+		ret = -EBADMSG;
+		goto done;
+	}	/* end of switch(op) */
+
+copy:
+	ret = copy_to_user(user, data, copylen);
+
+done:
+	vfree(data);
+	if (ret > 0)
+		ret = 0;
+	return ret;
+}
+
+static struct nf_sockopt_ops so_set __read_mostly = {
+	.pf		= PF_INET,
+	.get_optmin	= SO_IP_SET,
+	.get_optmax	= SO_IP_SET + 1,
+	.get		= &ip_set_sockfn_get,
+	.owner		= THIS_MODULE,
+};
+
+static int __init
+ip_set_init(void)
+{
+	int ret;
+
+	if (max_sets)
+		ip_set_max = max_sets;
+	if (ip_set_max >= IPSET_INVALID_ID)
+		ip_set_max = IPSET_INVALID_ID - 1;
+
+	ip_set_list = kzalloc(sizeof(struct ip_set *) * ip_set_max,
+			      GFP_KERNEL);
+	if (!ip_set_list) {
+		pr_err("ip_set: Unable to create ip_set_list\n");
+		return -ENOMEM;
+	}
+
+	ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
+	if (ret != 0) {
+		pr_err("ip_set: cannot register with nfnetlink.\n");
+		kfree(ip_set_list);
+		return ret;
+	}
+	ret = nf_register_sockopt(&so_set);
+	if (ret != 0) {
+		pr_err("SO_SET registry failed: %d\n", ret);
+		nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+		kfree(ip_set_list);
+		return ret;
+	}
+
+	pr_notice("ip_set: protocol %u\n", IPSET_PROTOCOL);
+	return 0;
+}
+
+static void __exit
+ip_set_fini(void)
+{
+	/* There can't be any existing set */
+	nf_unregister_sockopt(&so_set);
+	nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+	kfree(ip_set_list);
+	pr_debug("these are the famous last words\n");
+}
+
+module_init(ip_set_init);
+module_exit(ip_set_fini);
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
new file mode 100644
index 0000000..8d52272
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -0,0 +1,141 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Get Layer-4 data from the packets */
+
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include <linux/netfilter/ipset/ip_set_getport.h>
+
+/* We must handle non-linear skbs */
+static bool
+get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
+	 bool src, __be16 *port, u8 *proto)
+{
+	switch (protocol) {
+	case IPPROTO_TCP: {
+		struct tcphdr _tcph;
+		const struct tcphdr *th;
+
+		th = skb_header_pointer(skb, protooff, sizeof(_tcph), &_tcph);
+		if (th == NULL)
+			/* No choice either */
+			return false;
+
+		*port = src ? th->source : th->dest;
+		break;
+	}
+	case IPPROTO_UDP: {
+		struct udphdr _udph;
+		const struct udphdr *uh;
+
+		uh = skb_header_pointer(skb, protooff, sizeof(_udph), &_udph);
+		if (uh == NULL)
+			/* No choice either */
+			return false;
+
+		*port = src ? uh->source : uh->dest;
+		break;
+	}
+	case IPPROTO_ICMP: {
+		struct icmphdr _ich;
+		const struct icmphdr *ic;
+
+		ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
+		if (ic == NULL)
+			return false;
+
+		*port = (__force __be16)htons((ic->type << 8) | ic->code);
+		break;
+	}
+	case IPPROTO_ICMPV6: {
+		struct icmp6hdr _ich;
+		const struct icmp6hdr *ic;
+
+		ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
+		if (ic == NULL)
+			return false;
+
+		*port = (__force __be16)
+			htons((ic->icmp6_type << 8) | ic->icmp6_code);
+		break;
+	}
+	default:
+		break;
+	}
+	*proto = protocol;
+
+	return true;
+}
+
+bool
+ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
+		    __be16 *port, u8 *proto)
+{
+	const struct iphdr *iph = ip_hdr(skb);
+	unsigned int protooff = ip_hdrlen(skb);
+	int protocol = iph->protocol;
+
+	/* See comments at tcp_match in ip_tables.c */
+	if (protocol <= 0 || (ntohs(iph->frag_off) & IP_OFFSET))
+		return false;
+
+	return get_port(skb, protocol, protooff, src, port, proto);
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ip4_port);
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+bool
+ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
+		    __be16 *port, u8 *proto)
+{
+	int protoff;
+	u8 nexthdr;
+
+	nexthdr = ipv6_hdr(skb)->nexthdr;
+	protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
+	if (protoff < 0)
+		return false;
+
+	return get_port(skb, nexthdr, protoff, src, port, proto);
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ip6_port);
+#endif
+
+bool
+ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port)
+{
+	bool ret;
+	u8 proto;
+
+	switch (pf) {
+	case AF_INET:
+		ret = ip_set_get_ip4_port(skb, src, port, &proto);
+		break;
+	case AF_INET6:
+		ret = ip_set_get_ip6_port(skb, src, port, &proto);
+		break;
+	default:
+		return false;
+	}
+	if (!ret)
+		return ret;
+	switch (proto) {
+	case IPPROTO_TCP:
+	case IPPROTO_UDP:
+		return true;
+	default:
+		return false;
+	}
+}
+EXPORT_SYMBOL_GPL(ip_set_get_ip_port);
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
new file mode 100644
index 0000000..43bcce2
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -0,0 +1,464 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip");
+
+/* Type specific function prefix */
+#define TYPE		hash_ip
+
+static bool
+hash_ip_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ip4_same_set	hash_ip_same_set
+#define hash_ip6_same_set	hash_ip_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ip4_elem {
+	__be32 ip;
+};
+
+/* Member elements with timeout support */
+struct hash_ip4_telem {
+	__be32 ip;
+	unsigned long timeout;
+};
+
+static inline bool
+hash_ip4_data_equal(const struct hash_ip4_elem *ip1,
+		    const struct hash_ip4_elem *ip2)
+{
+	return ip1->ip == ip2->ip;
+}
+
+static inline bool
+hash_ip4_data_isnull(const struct hash_ip4_elem *elem)
+{
+	return elem->ip == 0;
+}
+
+static inline void
+hash_ip4_data_copy(struct hash_ip4_elem *dst, const struct hash_ip4_elem *src)
+{
+	dst->ip = src->ip;
+}
+
+/* Zero valued IP addresses cannot be stored */
+static inline void
+hash_ip4_data_zero_out(struct hash_ip4_elem *elem)
+{
+	elem->ip = 0;
+}
+
+static inline bool
+hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data)
+{
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+static bool
+hash_ip4_data_tlist(struct sk_buff *skb, const struct hash_ip4_elem *data)
+{
+	const struct hash_ip4_telem *tdata =
+		(const struct hash_ip4_telem *)data;
+
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+		      htonl(ip_set_timeout_get(tdata->timeout)));
+
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+#define IP_SET_HASH_WITH_NETMASK
+#define PF		4
+#define HOST_MASK	32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb,
+	      enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	__be32 ip;
+
+	ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &ip);
+	ip &= ip_set_netmask(h->netmask);
+	if (ip == 0)
+		return -EINVAL;
+
+	return adtfn(set, &ip, h->timeout);
+}
+
+static int
+hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
+	      enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	u32 ip, ip_to, hosts, timeout = h->timeout;
+	__be32 nip;
+	int ret = 0;
+
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
+	if (ret)
+		return ret;
+
+	ip &= ip_set_hostmask(h->netmask);
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		if (!with_timeout(h->timeout))
+			return -IPSET_ERR_TIMEOUT;
+		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+	}
+
+	if (adt == IPSET_TEST) {
+		nip = htonl(ip);
+		if (nip == 0)
+			return -IPSET_ERR_HASH_ELEM;
+		return adtfn(set, &nip, timeout);
+	}
+
+	if (tb[IPSET_ATTR_IP_TO]) {
+		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+		if (ret)
+			return ret;
+		if (ip > ip_to)
+			swap(ip, ip_to);
+	} else if (tb[IPSET_ATTR_CIDR]) {
+		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+		if (cidr > 32)
+			return -IPSET_ERR_INVALID_CIDR;
+		ip &= ip_set_hostmask(cidr);
+		ip_to = ip | ~ip_set_hostmask(cidr);
+	} else
+		ip_to = ip;
+
+	hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
+
+	for (; !before(ip_to, ip); ip += hosts) {
+		nip = htonl(ip);
+		if (nip == 0)
+			return -IPSET_ERR_HASH_ELEM;
+		ret = adtfn(set, &nip, timeout);
+
+		if (ret && !ip_set_eexist(ret, flags))
+			return ret;
+		else
+			ret = 0;
+	}
+	return ret;
+}
+
+static bool
+hash_ip_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+	const struct ip_set_hash *x = a->data;
+	const struct ip_set_hash *y = b->data;
+
+	/* Resizing changes htable_bits, so we ignore it */
+	return x->maxelem == y->maxelem &&
+	       x->timeout == y->timeout &&
+	       x->netmask == y->netmask;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ip6_elem {
+	union nf_inet_addr ip;
+};
+
+struct hash_ip6_telem {
+	union nf_inet_addr ip;
+	unsigned long timeout;
+};
+
+static inline bool
+hash_ip6_data_equal(const struct hash_ip6_elem *ip1,
+		    const struct hash_ip6_elem *ip2)
+{
+	return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0;
+}
+
+static inline bool
+hash_ip6_data_isnull(const struct hash_ip6_elem *elem)
+{
+	return ipv6_addr_any(&elem->ip.in6);
+}
+
+static inline void
+hash_ip6_data_copy(struct hash_ip6_elem *dst, const struct hash_ip6_elem *src)
+{
+	ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+}
+
+static inline void
+hash_ip6_data_zero_out(struct hash_ip6_elem *elem)
+{
+	ipv6_addr_set(&elem->ip.in6, 0, 0, 0, 0);
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+	ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+	ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+	ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+	ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static bool
+hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data)
+{
+	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+static bool
+hash_ip6_data_tlist(struct sk_buff *skb, const struct hash_ip6_elem *data)
+{
+	const struct hash_ip6_telem *e =
+		(const struct hash_ip6_telem *)data;
+
+	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+		      htonl(ip_set_timeout_get(e->timeout)));
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF		6
+#define HOST_MASK	128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb,
+	      enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	union nf_inet_addr ip;
+
+	ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &ip.in6);
+	ip6_netmask(&ip, h->netmask);
+	if (ipv6_addr_any(&ip.in6))
+		return -EINVAL;
+
+	return adtfn(set, &ip, h->timeout);
+}
+
+static const struct nla_policy hash_ip6_adt_policy[IPSET_ATTR_ADT_MAX + 1] = {
+	[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
+	[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+	[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
+};
+
+static int
+hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
+	      enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	union nf_inet_addr ip;
+	u32 timeout = h->timeout;
+	int ret;
+
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+		     tb[IPSET_ATTR_IP_TO] ||
+		     tb[IPSET_ATTR_CIDR]))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &ip);
+	if (ret)
+		return ret;
+
+	ip6_netmask(&ip, h->netmask);
+	if (ipv6_addr_any(&ip.in6))
+		return -IPSET_ERR_HASH_ELEM;
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		if (!with_timeout(h->timeout))
+			return -IPSET_ERR_TIMEOUT;
+		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+	}
+
+	ret = adtfn(set, &ip, timeout);
+
+	return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+	u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+	u8 netmask, hbits;
+	struct ip_set_hash *h;
+
+	if (!(set->family == AF_INET || set->family == AF_INET6))
+		return -IPSET_ERR_INVALID_FAMILY;
+	netmask = set->family == AF_INET ? 32 : 128;
+	pr_debug("Create set %s with family %s\n",
+		 set->name, set->family == AF_INET ? "inet" : "inet6");
+
+	if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_HASHSIZE]) {
+		hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+		if (hashsize < IPSET_MIMINAL_HASHSIZE)
+			hashsize = IPSET_MIMINAL_HASHSIZE;
+	}
+
+	if (tb[IPSET_ATTR_MAXELEM])
+		maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+	if (tb[IPSET_ATTR_NETMASK]) {
+		netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
+
+		if ((set->family == AF_INET && netmask > 32) ||
+		    (set->family == AF_INET6 && netmask > 128) ||
+		    netmask == 0)
+			return -IPSET_ERR_INVALID_NETMASK;
+	}
+
+	h = kzalloc(sizeof(*h), GFP_KERNEL);
+	if (!h)
+		return -ENOMEM;
+
+	h->maxelem = maxelem;
+	h->netmask = netmask;
+	get_random_bytes(&h->initval, sizeof(h->initval));
+	h->timeout = IPSET_NO_TIMEOUT;
+
+	hbits = htable_bits(hashsize);
+	h->table = ip_set_alloc(
+			sizeof(struct htable)
+			+ jhash_size(hbits) * sizeof(struct hbucket));
+	if (!h->table) {
+		kfree(h);
+		return -ENOMEM;
+	}
+	h->table->htable_bits = hbits;
+
+	set->data = h;
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+		set->variant = set->family == AF_INET
+			? &hash_ip4_tvariant : &hash_ip6_tvariant;
+
+		if (set->family == AF_INET)
+			hash_ip4_gc_init(set);
+		else
+			hash_ip6_gc_init(set);
+	} else {
+		set->variant = set->family == AF_INET
+			? &hash_ip4_variant : &hash_ip6_variant;
+	}
+
+	pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+		 set->name, jhash_size(h->table->htable_bits),
+		 h->table->htable_bits, h->maxelem, set->data, h->table);
+
+	return 0;
+}
+
+static struct ip_set_type hash_ip_type __read_mostly = {
+	.name		= "hash:ip",
+	.protocol	= IPSET_PROTOCOL,
+	.features	= IPSET_TYPE_IP,
+	.dimension	= IPSET_DIM_ONE,
+	.family		= AF_UNSPEC,
+	.revision	= 0,
+	.create		= hash_ip_create,
+	.create_policy	= {
+		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
+		[IPSET_ATTR_MAXELEM]	= { .type = NLA_U32 },
+		[IPSET_ATTR_PROBES]	= { .type = NLA_U8 },
+		[IPSET_ATTR_RESIZE]	= { .type = NLA_U8  },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+		[IPSET_ATTR_NETMASK]	= { .type = NLA_U8  },
+	},
+	.adt_policy	= {
+		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
+		[IPSET_ATTR_IP_TO]	= { .type = NLA_NESTED },
+		[IPSET_ATTR_CIDR]	= { .type = NLA_U8 },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
+	},
+	.me		= THIS_MODULE,
+};
+
+static int __init
+hash_ip_init(void)
+{
+	return ip_set_type_register(&hash_ip_type);
+}
+
+static void __exit
+hash_ip_fini(void)
+{
+	ip_set_type_unregister(&hash_ip_type);
+}
+
+module_init(hash_ip_init);
+module_exit(hash_ip_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
new file mode 100644
index 0000000..adbe787
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -0,0 +1,544 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip,port type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip,port");
+
+/* Type specific function prefix */
+#define TYPE		hash_ipport
+
+static bool
+hash_ipport_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ipport4_same_set	hash_ipport_same_set
+#define hash_ipport6_same_set	hash_ipport_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ipport4_elem {
+	__be32 ip;
+	__be16 port;
+	u8 proto;
+	u8 padding;
+};
+
+/* Member elements with timeout support */
+struct hash_ipport4_telem {
+	__be32 ip;
+	__be16 port;
+	u8 proto;
+	u8 padding;
+	unsigned long timeout;
+};
+
+static inline bool
+hash_ipport4_data_equal(const struct hash_ipport4_elem *ip1,
+			const struct hash_ipport4_elem *ip2)
+{
+	return ip1->ip == ip2->ip &&
+	       ip1->port == ip2->port &&
+	       ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipport4_data_isnull(const struct hash_ipport4_elem *elem)
+{
+	return elem->proto == 0;
+}
+
+static inline void
+hash_ipport4_data_copy(struct hash_ipport4_elem *dst,
+		       const struct hash_ipport4_elem *src)
+{
+	dst->ip = src->ip;
+	dst->port = src->port;
+	dst->proto = src->proto;
+}
+
+static inline void
+hash_ipport4_data_zero_out(struct hash_ipport4_elem *elem)
+{
+	elem->proto = 0;
+}
+
+static bool
+hash_ipport4_data_list(struct sk_buff *skb,
+		       const struct hash_ipport4_elem *data)
+{
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+static bool
+hash_ipport4_data_tlist(struct sk_buff *skb,
+			const struct hash_ipport4_elem *data)
+{
+	const struct hash_ipport4_telem *tdata =
+		(const struct hash_ipport4_telem *)data;
+
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+		      htonl(ip_set_timeout_get(tdata->timeout)));
+
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+#define PF		4
+#define HOST_MASK	32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb,
+		  enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_ipport4_elem data = { };
+
+	if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+				 &data.port, &data.proto))
+		return -EINVAL;
+
+	ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+
+	return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
+		  enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_ipport4_elem data = { };
+	u32 ip, ip_to, p, port, port_to;
+	u32 timeout = h->timeout;
+	int ret;
+
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_PORT])
+		data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+	else
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_PROTO]) {
+		data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+		if (data.proto == 0)
+			return -IPSET_ERR_INVALID_PROTO;
+	} else
+		return -IPSET_ERR_MISSING_PROTO;
+
+	switch (data.proto) {
+	case IPPROTO_UDP:
+	case IPPROTO_TCP:
+	case IPPROTO_ICMP:
+		break;
+	default:
+		data.port = 0;
+		break;
+	}
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		if (!with_timeout(h->timeout))
+			return -IPSET_ERR_TIMEOUT;
+		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+	}
+
+	if (adt == IPSET_TEST ||
+	    !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+	    !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
+	      tb[IPSET_ATTR_PORT_TO])) {
+		ret = adtfn(set, &data, timeout);
+		return ip_set_eexist(ret, flags) ? 0 : ret;
+	}
+
+	ip = ntohl(data.ip);
+	if (tb[IPSET_ATTR_IP_TO]) {
+		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+		if (ret)
+			return ret;
+		if (ip > ip_to)
+			swap(ip, ip_to);
+	} else if (tb[IPSET_ATTR_CIDR]) {
+		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+		if (cidr > 32)
+			return -IPSET_ERR_INVALID_CIDR;
+		ip &= ip_set_hostmask(cidr);
+		ip_to = ip | ~ip_set_hostmask(cidr);
+	} else
+		ip_to = ip;
+
+	port = ntohs(data.port);
+	if (tb[IPSET_ATTR_PORT_TO]) {
+		port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+		if (port > port_to)
+			swap(port, port_to);
+	} else
+		port_to = port;
+
+	for (; !before(ip_to, ip); ip++)
+		for (p = port; p <= port_to; p++) {
+			data.ip = htonl(ip);
+			data.port = htons(p);
+			ret = adtfn(set, &data, timeout);
+
+			if (ret && !ip_set_eexist(ret, flags))
+				return ret;
+			else
+				ret = 0;
+		}
+	return ret;
+}
+
+static bool
+hash_ipport_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+	const struct ip_set_hash *x = a->data;
+	const struct ip_set_hash *y = b->data;
+
+	/* Resizing changes htable_bits, so we ignore it */
+	return x->maxelem == y->maxelem &&
+	       x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ipport6_elem {
+	union nf_inet_addr ip;
+	__be16 port;
+	u8 proto;
+	u8 padding;
+};
+
+struct hash_ipport6_telem {
+	union nf_inet_addr ip;
+	__be16 port;
+	u8 proto;
+	u8 padding;
+	unsigned long timeout;
+};
+
+static inline bool
+hash_ipport6_data_equal(const struct hash_ipport6_elem *ip1,
+			const struct hash_ipport6_elem *ip2)
+{
+	return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+	       ip1->port == ip2->port &&
+	       ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipport6_data_isnull(const struct hash_ipport6_elem *elem)
+{
+	return elem->proto == 0;
+}
+
+static inline void
+hash_ipport6_data_copy(struct hash_ipport6_elem *dst,
+		       const struct hash_ipport6_elem *src)
+{
+	memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipport6_data_zero_out(struct hash_ipport6_elem *elem)
+{
+	elem->proto = 0;
+}
+
+static bool
+hash_ipport6_data_list(struct sk_buff *skb,
+		       const struct hash_ipport6_elem *data)
+{
+	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+static bool
+hash_ipport6_data_tlist(struct sk_buff *skb,
+			const struct hash_ipport6_elem *data)
+{
+	const struct hash_ipport6_telem *e =
+		(const struct hash_ipport6_telem *)data;
+
+	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+		      htonl(ip_set_timeout_get(e->timeout)));
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF		6
+#define HOST_MASK	128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb,
+		  enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_ipport6_elem data = { };
+
+	if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+				 &data.port, &data.proto))
+		return -EINVAL;
+
+	ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+
+	return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
+		  enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_ipport6_elem data = { };
+	u32 port, port_to;
+	u32 timeout = h->timeout;
+	int ret;
+
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+		     tb[IPSET_ATTR_IP_TO] ||
+		     tb[IPSET_ATTR_CIDR]))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_PORT])
+		data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+	else
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_PROTO]) {
+		data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+		if (data.proto == 0)
+			return -IPSET_ERR_INVALID_PROTO;
+	} else
+		return -IPSET_ERR_MISSING_PROTO;
+
+	switch (data.proto) {
+	case IPPROTO_UDP:
+	case IPPROTO_TCP:
+	case IPPROTO_ICMPV6:
+		break;
+	default:
+		data.port = 0;
+		break;
+	}
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		if (!with_timeout(h->timeout))
+			return -IPSET_ERR_TIMEOUT;
+		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+	}
+
+	if (adt == IPSET_TEST ||
+	    !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+	    !tb[IPSET_ATTR_PORT_TO]) {
+		ret = adtfn(set, &data, timeout);
+		return ip_set_eexist(ret, flags) ? 0 : ret;
+	}
+
+	port = ntohs(data.port);
+	port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+	if (port > port_to)
+		swap(port, port_to);
+
+	for (; port <= port_to; port++) {
+		data.port = htons(port);
+		ret = adtfn(set, &data, timeout);
+
+		if (ret && !ip_set_eexist(ret, flags))
+			return ret;
+		else
+			ret = 0;
+	}
+	return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+	struct ip_set_hash *h;
+	u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+	u8 hbits;
+
+	if (!(set->family == AF_INET || set->family == AF_INET6))
+		return -IPSET_ERR_INVALID_FAMILY;
+
+	if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_HASHSIZE]) {
+		hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+		if (hashsize < IPSET_MIMINAL_HASHSIZE)
+			hashsize = IPSET_MIMINAL_HASHSIZE;
+	}
+
+	if (tb[IPSET_ATTR_MAXELEM])
+		maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+	h = kzalloc(sizeof(*h), GFP_KERNEL);
+	if (!h)
+		return -ENOMEM;
+
+	h->maxelem = maxelem;
+	get_random_bytes(&h->initval, sizeof(h->initval));
+	h->timeout = IPSET_NO_TIMEOUT;
+
+	hbits = htable_bits(hashsize);
+	h->table = ip_set_alloc(
+			sizeof(struct htable)
+			+ jhash_size(hbits) * sizeof(struct hbucket));
+	if (!h->table) {
+		kfree(h);
+		return -ENOMEM;
+	}
+	h->table->htable_bits = hbits;
+
+	set->data = h;
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+		set->variant = set->family == AF_INET
+			? &hash_ipport4_tvariant : &hash_ipport6_tvariant;
+
+		if (set->family == AF_INET)
+			hash_ipport4_gc_init(set);
+		else
+			hash_ipport6_gc_init(set);
+	} else {
+		set->variant = set->family == AF_INET
+			? &hash_ipport4_variant : &hash_ipport6_variant;
+	}
+
+	pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+		 set->name, jhash_size(h->table->htable_bits),
+		 h->table->htable_bits, h->maxelem, set->data, h->table);
+
+	return 0;
+}
+
+static struct ip_set_type hash_ipport_type __read_mostly = {
+	.name		= "hash:ip,port",
+	.protocol	= IPSET_PROTOCOL,
+	.features	= IPSET_TYPE_IP | IPSET_TYPE_PORT,
+	.dimension	= IPSET_DIM_TWO,
+	.family		= AF_UNSPEC,
+	.revision	= 0,
+	.create		= hash_ipport_create,
+	.create_policy	= {
+		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
+		[IPSET_ATTR_MAXELEM]	= { .type = NLA_U32 },
+		[IPSET_ATTR_PROBES]	= { .type = NLA_U8 },
+		[IPSET_ATTR_RESIZE]	= { .type = NLA_U8  },
+		[IPSET_ATTR_PROTO]	= { .type = NLA_U8 },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+	},
+	.adt_policy	= {
+		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
+		[IPSET_ATTR_IP_TO]	= { .type = NLA_NESTED },
+		[IPSET_ATTR_PORT]	= { .type = NLA_U16 },
+		[IPSET_ATTR_PORT_TO]	= { .type = NLA_U16 },
+		[IPSET_ATTR_CIDR]	= { .type = NLA_U8 },
+		[IPSET_ATTR_PROTO]	= { .type = NLA_U8 },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
+	},
+	.me		= THIS_MODULE,
+};
+
+static int __init
+hash_ipport_init(void)
+{
+	return ip_set_type_register(&hash_ipport_type);
+}
+
+static void __exit
+hash_ipport_fini(void)
+{
+	ip_set_type_unregister(&hash_ipport_type);
+}
+
+module_init(hash_ipport_init);
+module_exit(hash_ipport_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
new file mode 100644
index 0000000..22e23ab
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -0,0 +1,562 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port,ip type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip,port,ip type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip,port,ip");
+
+/* Type specific function prefix */
+#define TYPE		hash_ipportip
+
+static bool
+hash_ipportip_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ipportip4_same_set	hash_ipportip_same_set
+#define hash_ipportip6_same_set	hash_ipportip_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ipportip4_elem {
+	__be32 ip;
+	__be32 ip2;
+	__be16 port;
+	u8 proto;
+	u8 padding;
+};
+
+/* Member elements with timeout support */
+struct hash_ipportip4_telem {
+	__be32 ip;
+	__be32 ip2;
+	__be16 port;
+	u8 proto;
+	u8 padding;
+	unsigned long timeout;
+};
+
+static inline bool
+hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1,
+			  const struct hash_ipportip4_elem *ip2)
+{
+	return ip1->ip == ip2->ip &&
+	       ip1->ip2 == ip2->ip2 &&
+	       ip1->port == ip2->port &&
+	       ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportip4_data_isnull(const struct hash_ipportip4_elem *elem)
+{
+	return elem->proto == 0;
+}
+
+static inline void
+hash_ipportip4_data_copy(struct hash_ipportip4_elem *dst,
+			 const struct hash_ipportip4_elem *src)
+{
+	memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportip4_data_zero_out(struct hash_ipportip4_elem *elem)
+{
+	elem->proto = 0;
+}
+
+static bool
+hash_ipportip4_data_list(struct sk_buff *skb,
+		       const struct hash_ipportip4_elem *data)
+{
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
+	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+static bool
+hash_ipportip4_data_tlist(struct sk_buff *skb,
+			const struct hash_ipportip4_elem *data)
+{
+	const struct hash_ipportip4_telem *tdata =
+		(const struct hash_ipportip4_telem *)data;
+
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
+	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+		      htonl(ip_set_timeout_get(tdata->timeout)));
+
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+#define PF		4
+#define HOST_MASK	32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb,
+		    enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_ipportip4_elem data = { };
+
+	if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+				 &data.port, &data.proto))
+		return -EINVAL;
+
+	ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+	ip4addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2);
+
+	return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
+		    enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_ipportip4_elem data = { };
+	u32 ip, ip_to, p, port, port_to;
+	u32 timeout = h->timeout;
+	int ret;
+
+	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &data.ip2);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_PORT])
+		data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+	else
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_PROTO]) {
+		data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+		if (data.proto == 0)
+			return -IPSET_ERR_INVALID_PROTO;
+	} else
+		return -IPSET_ERR_MISSING_PROTO;
+
+	switch (data.proto) {
+	case IPPROTO_UDP:
+	case IPPROTO_TCP:
+	case IPPROTO_ICMP:
+		break;
+	default:
+		data.port = 0;
+		break;
+	}
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		if (!with_timeout(h->timeout))
+			return -IPSET_ERR_TIMEOUT;
+		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+	}
+
+	if (adt == IPSET_TEST ||
+	    !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+	    !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
+	      tb[IPSET_ATTR_PORT_TO])) {
+		ret = adtfn(set, &data, timeout);
+		return ip_set_eexist(ret, flags) ? 0 : ret;
+	}
+
+	ip = ntohl(data.ip);
+	if (tb[IPSET_ATTR_IP_TO]) {
+		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+		if (ret)
+			return ret;
+		if (ip > ip_to)
+			swap(ip, ip_to);
+	} else if (tb[IPSET_ATTR_CIDR]) {
+		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+		if (cidr > 32)
+			return -IPSET_ERR_INVALID_CIDR;
+		ip &= ip_set_hostmask(cidr);
+		ip_to = ip | ~ip_set_hostmask(cidr);
+	} else
+		ip_to = ip;
+
+	port = ntohs(data.port);
+	if (tb[IPSET_ATTR_PORT_TO]) {
+		port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+		if (port > port_to)
+			swap(port, port_to);
+	} else
+		port_to = port;
+
+	for (; !before(ip_to, ip); ip++)
+		for (p = port; p <= port_to; p++) {
+			data.ip = htonl(ip);
+			data.port = htons(p);
+			ret = adtfn(set, &data, timeout);
+
+			if (ret && !ip_set_eexist(ret, flags))
+				return ret;
+			else
+				ret = 0;
+		}
+	return ret;
+}
+
+static bool
+hash_ipportip_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+	const struct ip_set_hash *x = a->data;
+	const struct ip_set_hash *y = b->data;
+
+	/* Resizing changes htable_bits, so we ignore it */
+	return x->maxelem == y->maxelem &&
+	       x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ipportip6_elem {
+	union nf_inet_addr ip;
+	union nf_inet_addr ip2;
+	__be16 port;
+	u8 proto;
+	u8 padding;
+};
+
+struct hash_ipportip6_telem {
+	union nf_inet_addr ip;
+	union nf_inet_addr ip2;
+	__be16 port;
+	u8 proto;
+	u8 padding;
+	unsigned long timeout;
+};
+
+static inline bool
+hash_ipportip6_data_equal(const struct hash_ipportip6_elem *ip1,
+			  const struct hash_ipportip6_elem *ip2)
+{
+	return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+	       ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 &&
+	       ip1->port == ip2->port &&
+	       ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportip6_data_isnull(const struct hash_ipportip6_elem *elem)
+{
+	return elem->proto == 0;
+}
+
+static inline void
+hash_ipportip6_data_copy(struct hash_ipportip6_elem *dst,
+			 const struct hash_ipportip6_elem *src)
+{
+	memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportip6_data_zero_out(struct hash_ipportip6_elem *elem)
+{
+	elem->proto = 0;
+}
+
+static bool
+hash_ipportip6_data_list(struct sk_buff *skb,
+			 const struct hash_ipportip6_elem *data)
+{
+	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+static bool
+hash_ipportip6_data_tlist(struct sk_buff *skb,
+			  const struct hash_ipportip6_elem *data)
+{
+	const struct hash_ipportip6_telem *e =
+		(const struct hash_ipportip6_telem *)data;
+
+	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+		      htonl(ip_set_timeout_get(e->timeout)));
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF		6
+#define HOST_MASK	128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb,
+		    enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_ipportip6_elem data = { };
+
+	if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+				 &data.port, &data.proto))
+		return -EINVAL;
+
+	ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+	ip6addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
+
+	return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
+		    enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_ipportip6_elem data = { };
+	u32 port, port_to;
+	u32 timeout = h->timeout;
+	int ret;
+
+	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+		     tb[IPSET_ATTR_IP_TO] ||
+		     tb[IPSET_ATTR_CIDR]))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &data.ip2);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_PORT])
+		data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+	else
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_PROTO]) {
+		data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+		if (data.proto == 0)
+			return -IPSET_ERR_INVALID_PROTO;
+	} else
+		return -IPSET_ERR_MISSING_PROTO;
+
+	switch (data.proto) {
+	case IPPROTO_UDP:
+	case IPPROTO_TCP:
+	case IPPROTO_ICMPV6:
+		break;
+	default:
+		data.port = 0;
+		break;
+	}
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		if (!with_timeout(h->timeout))
+			return -IPSET_ERR_TIMEOUT;
+		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+	}
+
+	if (adt == IPSET_TEST ||
+	    !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+	    !tb[IPSET_ATTR_PORT_TO]) {
+		ret = adtfn(set, &data, timeout);
+		return ip_set_eexist(ret, flags) ? 0 : ret;
+	}
+
+	port = ntohs(data.port);
+	port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+	if (port > port_to)
+		swap(port, port_to);
+
+	for (; port <= port_to; port++) {
+		data.port = htons(port);
+		ret = adtfn(set, &data, timeout);
+
+		if (ret && !ip_set_eexist(ret, flags))
+			return ret;
+		else
+			ret = 0;
+	}
+	return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+	struct ip_set_hash *h;
+	u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+	u8 hbits;
+
+	if (!(set->family == AF_INET || set->family == AF_INET6))
+		return -IPSET_ERR_INVALID_FAMILY;
+
+	if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_HASHSIZE]) {
+		hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+		if (hashsize < IPSET_MIMINAL_HASHSIZE)
+			hashsize = IPSET_MIMINAL_HASHSIZE;
+	}
+
+	if (tb[IPSET_ATTR_MAXELEM])
+		maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+	h = kzalloc(sizeof(*h), GFP_KERNEL);
+	if (!h)
+		return -ENOMEM;
+
+	h->maxelem = maxelem;
+	get_random_bytes(&h->initval, sizeof(h->initval));
+	h->timeout = IPSET_NO_TIMEOUT;
+
+	hbits = htable_bits(hashsize);
+	h->table = ip_set_alloc(
+			sizeof(struct htable)
+			+ jhash_size(hbits) * sizeof(struct hbucket));
+	if (!h->table) {
+		kfree(h);
+		return -ENOMEM;
+	}
+	h->table->htable_bits = hbits;
+
+	set->data = h;
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+		set->variant = set->family == AF_INET
+			? &hash_ipportip4_tvariant : &hash_ipportip6_tvariant;
+
+		if (set->family == AF_INET)
+			hash_ipportip4_gc_init(set);
+		else
+			hash_ipportip6_gc_init(set);
+	} else {
+		set->variant = set->family == AF_INET
+			? &hash_ipportip4_variant : &hash_ipportip6_variant;
+	}
+
+	pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+		 set->name, jhash_size(h->table->htable_bits),
+		 h->table->htable_bits, h->maxelem, set->data, h->table);
+
+	return 0;
+}
+
+static struct ip_set_type hash_ipportip_type __read_mostly = {
+	.name		= "hash:ip,port,ip",
+	.protocol	= IPSET_PROTOCOL,
+	.features	= IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
+	.dimension	= IPSET_DIM_THREE,
+	.family		= AF_UNSPEC,
+	.revision	= 0,
+	.create		= hash_ipportip_create,
+	.create_policy	= {
+		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
+		[IPSET_ATTR_MAXELEM]	= { .type = NLA_U32 },
+		[IPSET_ATTR_PROBES]	= { .type = NLA_U8 },
+		[IPSET_ATTR_RESIZE]	= { .type = NLA_U8  },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+	},
+	.adt_policy	= {
+		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
+		[IPSET_ATTR_IP_TO]	= { .type = NLA_NESTED },
+		[IPSET_ATTR_IP2]	= { .type = NLA_NESTED },
+		[IPSET_ATTR_PORT]	= { .type = NLA_U16 },
+		[IPSET_ATTR_PORT_TO]	= { .type = NLA_U16 },
+		[IPSET_ATTR_CIDR]	= { .type = NLA_U8 },
+		[IPSET_ATTR_PROTO]	= { .type = NLA_U8 },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
+	},
+	.me		= THIS_MODULE,
+};
+
+static int __init
+hash_ipportip_init(void)
+{
+	return ip_set_type_register(&hash_ipportip_type);
+}
+
+static void __exit
+hash_ipportip_fini(void)
+{
+	ip_set_type_unregister(&hash_ipportip_type);
+}
+
+module_init(hash_ipportip_init);
+module_exit(hash_ipportip_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
new file mode 100644
index 0000000..6033e8b
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -0,0 +1,628 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port,net type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:ip,port,net type of IP sets");
+MODULE_ALIAS("ip_set_hash:ip,port,net");
+
+/* Type specific function prefix */
+#define TYPE		hash_ipportnet
+
+static bool
+hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_ipportnet4_same_set	hash_ipportnet_same_set
+#define hash_ipportnet6_same_set	hash_ipportnet_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_ipportnet4_elem {
+	__be32 ip;
+	__be32 ip2;
+	__be16 port;
+	u8 cidr;
+	u8 proto;
+};
+
+/* Member elements with timeout support */
+struct hash_ipportnet4_telem {
+	__be32 ip;
+	__be32 ip2;
+	__be16 port;
+	u8 cidr;
+	u8 proto;
+	unsigned long timeout;
+};
+
+static inline bool
+hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1,
+			   const struct hash_ipportnet4_elem *ip2)
+{
+	return ip1->ip == ip2->ip &&
+	       ip1->ip2 == ip2->ip2 &&
+	       ip1->cidr == ip2->cidr &&
+	       ip1->port == ip2->port &&
+	       ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportnet4_data_isnull(const struct hash_ipportnet4_elem *elem)
+{
+	return elem->proto == 0;
+}
+
+static inline void
+hash_ipportnet4_data_copy(struct hash_ipportnet4_elem *dst,
+			  const struct hash_ipportnet4_elem *src)
+{
+	memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr)
+{
+	elem->ip2 &= ip_set_netmask(cidr);
+	elem->cidr = cidr;
+}
+
+static inline void
+hash_ipportnet4_data_zero_out(struct hash_ipportnet4_elem *elem)
+{
+	elem->proto = 0;
+}
+
+static bool
+hash_ipportnet4_data_list(struct sk_buff *skb,
+			  const struct hash_ipportnet4_elem *data)
+{
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
+	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+	NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+static bool
+hash_ipportnet4_data_tlist(struct sk_buff *skb,
+			   const struct hash_ipportnet4_elem *data)
+{
+	const struct hash_ipportnet4_telem *tdata =
+		(const struct hash_ipportnet4_telem *)data;
+
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
+	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+	NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+		      htonl(ip_set_timeout_get(tdata->timeout)));
+
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+#define IP_SET_HASH_WITH_PROTO
+#define IP_SET_HASH_WITH_NETS
+
+#define PF		4
+#define HOST_MASK	32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
+		     enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_ipportnet4_elem data =
+		{ .cidr = h->nets[0].cidr || HOST_MASK };
+
+	if (data.cidr == 0)
+		return -EINVAL;
+	if (adt == IPSET_TEST)
+		data.cidr = HOST_MASK;
+
+	if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+				 &data.port, &data.proto))
+		return -EINVAL;
+
+	ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+	ip4addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2);
+	data.ip2 &= ip_set_netmask(data.cidr);
+
+	return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+		     enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
+	u32 ip, ip_to, p, port, port_to;
+	u32 timeout = h->timeout;
+	int ret;
+
+	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &data.ip2);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_CIDR2])
+		data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+
+	if (!data.cidr)
+		return -IPSET_ERR_INVALID_CIDR;
+
+	data.ip2 &= ip_set_netmask(data.cidr);
+
+	if (tb[IPSET_ATTR_PORT])
+		data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+	else
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_PROTO]) {
+		data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+		if (data.proto == 0)
+			return -IPSET_ERR_INVALID_PROTO;
+	} else
+		return -IPSET_ERR_MISSING_PROTO;
+
+	switch (data.proto) {
+	case IPPROTO_UDP:
+	case IPPROTO_TCP:
+	case IPPROTO_ICMP:
+		break;
+	default:
+		data.port = 0;
+		break;
+	}
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		if (!with_timeout(h->timeout))
+			return -IPSET_ERR_TIMEOUT;
+		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+	}
+
+	if (adt == IPSET_TEST ||
+	    !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+	    !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
+	      tb[IPSET_ATTR_PORT_TO])) {
+		ret = adtfn(set, &data, timeout);
+		return ip_set_eexist(ret, flags) ? 0 : ret;
+	}
+
+	ip = ntohl(data.ip);
+	if (tb[IPSET_ATTR_IP_TO]) {
+		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+		if (ret)
+			return ret;
+		if (ip > ip_to)
+			swap(ip, ip_to);
+	} else if (tb[IPSET_ATTR_CIDR]) {
+		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+		if (cidr > 32)
+			return -IPSET_ERR_INVALID_CIDR;
+		ip &= ip_set_hostmask(cidr);
+		ip_to = ip | ~ip_set_hostmask(cidr);
+	} else
+		ip_to = ip;
+
+	port = ntohs(data.port);
+	if (tb[IPSET_ATTR_PORT_TO]) {
+		port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+		if (port > port_to)
+			swap(port, port_to);
+	} else
+		port_to = port;
+
+	for (; !before(ip_to, ip); ip++)
+		for (p = port; p <= port_to; p++) {
+			data.ip = htonl(ip);
+			data.port = htons(p);
+			ret = adtfn(set, &data, timeout);
+
+			if (ret && !ip_set_eexist(ret, flags))
+				return ret;
+			else
+				ret = 0;
+		}
+	return ret;
+}
+
+static bool
+hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+	const struct ip_set_hash *x = a->data;
+	const struct ip_set_hash *y = b->data;
+
+	/* Resizing changes htable_bits, so we ignore it */
+	return x->maxelem == y->maxelem &&
+	       x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_ipportnet6_elem {
+	union nf_inet_addr ip;
+	union nf_inet_addr ip2;
+	__be16 port;
+	u8 cidr;
+	u8 proto;
+};
+
+struct hash_ipportnet6_telem {
+	union nf_inet_addr ip;
+	union nf_inet_addr ip2;
+	__be16 port;
+	u8 cidr;
+	u8 proto;
+	unsigned long timeout;
+};
+
+static inline bool
+hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1,
+			   const struct hash_ipportnet6_elem *ip2)
+{
+	return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+	       ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 &&
+	       ip1->cidr == ip2->cidr &&
+	       ip1->port == ip2->port &&
+	       ip1->proto == ip2->proto;
+}
+
+static inline bool
+hash_ipportnet6_data_isnull(const struct hash_ipportnet6_elem *elem)
+{
+	return elem->proto == 0;
+}
+
+static inline void
+hash_ipportnet6_data_copy(struct hash_ipportnet6_elem *dst,
+			  const struct hash_ipportnet6_elem *src)
+{
+	memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_ipportnet6_data_zero_out(struct hash_ipportnet6_elem *elem)
+{
+	elem->proto = 0;
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+	ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+	ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+	ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+	ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static inline void
+hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr)
+{
+	ip6_netmask(&elem->ip2, cidr);
+	elem->cidr = cidr;
+}
+
+static bool
+hash_ipportnet6_data_list(struct sk_buff *skb,
+			  const struct hash_ipportnet6_elem *data)
+{
+	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+	NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+static bool
+hash_ipportnet6_data_tlist(struct sk_buff *skb,
+			   const struct hash_ipportnet6_elem *data)
+{
+	const struct hash_ipportnet6_telem *e =
+		(const struct hash_ipportnet6_telem *)data;
+
+	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
+	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+	NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+		      htonl(ip_set_timeout_get(e->timeout)));
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF		6
+#define HOST_MASK	128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
+		     enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_ipportnet6_elem data =
+		{ .cidr = h->nets[0].cidr || HOST_MASK };
+
+	if (data.cidr == 0)
+		return -EINVAL;
+	if (adt == IPSET_TEST)
+		data.cidr = HOST_MASK;
+
+	if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+				 &data.port, &data.proto))
+		return -EINVAL;
+
+	ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+	ip6addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
+	ip6_netmask(&data.ip2, data.cidr);
+
+	return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
+		     enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_ipportnet6_elem data = { .cidr = HOST_MASK };
+	u32 port, port_to;
+	u32 timeout = h->timeout;
+	int ret;
+
+	if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+		     tb[IPSET_ATTR_IP_TO] ||
+		     tb[IPSET_ATTR_CIDR]))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+	if (ret)
+		return ret;
+
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &data.ip2);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_CIDR2])
+		data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+
+	if (!data.cidr)
+		return -IPSET_ERR_INVALID_CIDR;
+
+	ip6_netmask(&data.ip2, data.cidr);
+
+	if (tb[IPSET_ATTR_PORT])
+		data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+	else
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_PROTO]) {
+		data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+		if (data.proto == 0)
+			return -IPSET_ERR_INVALID_PROTO;
+	} else
+		return -IPSET_ERR_MISSING_PROTO;
+
+	switch (data.proto) {
+	case IPPROTO_UDP:
+	case IPPROTO_TCP:
+	case IPPROTO_ICMPV6:
+		break;
+	default:
+		data.port = 0;
+		break;
+	}
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		if (!with_timeout(h->timeout))
+			return -IPSET_ERR_TIMEOUT;
+		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+	}
+
+	if (adt == IPSET_TEST ||
+	    !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+	    !tb[IPSET_ATTR_PORT_TO]) {
+		ret = adtfn(set, &data, timeout);
+		return ip_set_eexist(ret, flags) ? 0 : ret;
+	}
+
+	port = ntohs(data.port);
+	port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+	if (port > port_to)
+		swap(port, port_to);
+
+	for (; port <= port_to; port++) {
+		data.port = htons(port);
+		ret = adtfn(set, &data, timeout);
+
+		if (ret && !ip_set_eexist(ret, flags))
+			return ret;
+		else
+			ret = 0;
+	}
+	return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+	struct ip_set_hash *h;
+	u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+	u8 hbits;
+
+	if (!(set->family == AF_INET || set->family == AF_INET6))
+		return -IPSET_ERR_INVALID_FAMILY;
+
+	if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_HASHSIZE]) {
+		hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+		if (hashsize < IPSET_MIMINAL_HASHSIZE)
+			hashsize = IPSET_MIMINAL_HASHSIZE;
+	}
+
+	if (tb[IPSET_ATTR_MAXELEM])
+		maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+	h = kzalloc(sizeof(*h)
+		    + sizeof(struct ip_set_hash_nets)
+		      * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+	if (!h)
+		return -ENOMEM;
+
+	h->maxelem = maxelem;
+	get_random_bytes(&h->initval, sizeof(h->initval));
+	h->timeout = IPSET_NO_TIMEOUT;
+
+	hbits = htable_bits(hashsize);
+	h->table = ip_set_alloc(
+			sizeof(struct htable)
+			+ jhash_size(hbits) * sizeof(struct hbucket));
+	if (!h->table) {
+		kfree(h);
+		return -ENOMEM;
+	}
+	h->table->htable_bits = hbits;
+
+	set->data = h;
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+		set->variant = set->family == AF_INET
+			? &hash_ipportnet4_tvariant
+			: &hash_ipportnet6_tvariant;
+
+		if (set->family == AF_INET)
+			hash_ipportnet4_gc_init(set);
+		else
+			hash_ipportnet6_gc_init(set);
+	} else {
+		set->variant = set->family == AF_INET
+			? &hash_ipportnet4_variant : &hash_ipportnet6_variant;
+	}
+
+	pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+		 set->name, jhash_size(h->table->htable_bits),
+		 h->table->htable_bits, h->maxelem, set->data, h->table);
+
+	return 0;
+}
+
+static struct ip_set_type hash_ipportnet_type __read_mostly = {
+	.name		= "hash:ip,port,net",
+	.protocol	= IPSET_PROTOCOL,
+	.features	= IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
+	.dimension	= IPSET_DIM_THREE,
+	.family		= AF_UNSPEC,
+	.revision	= 0,
+	.create		= hash_ipportnet_create,
+	.create_policy	= {
+		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
+		[IPSET_ATTR_MAXELEM]	= { .type = NLA_U32 },
+		[IPSET_ATTR_PROBES]	= { .type = NLA_U8 },
+		[IPSET_ATTR_RESIZE]	= { .type = NLA_U8  },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+	},
+	.adt_policy	= {
+		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
+		[IPSET_ATTR_IP_TO]	= { .type = NLA_NESTED },
+		[IPSET_ATTR_IP2]	= { .type = NLA_NESTED },
+		[IPSET_ATTR_PORT]	= { .type = NLA_U16 },
+		[IPSET_ATTR_PORT_TO]	= { .type = NLA_U16 },
+		[IPSET_ATTR_CIDR]	= { .type = NLA_U8 },
+		[IPSET_ATTR_CIDR2]	= { .type = NLA_U8 },
+		[IPSET_ATTR_PROTO]	= { .type = NLA_U8 },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
+	},
+	.me		= THIS_MODULE,
+};
+
+static int __init
+hash_ipportnet_init(void)
+{
+	return ip_set_type_register(&hash_ipportnet_type);
+}
+
+static void __exit
+hash_ipportnet_fini(void)
+{
+	ip_set_type_unregister(&hash_ipportnet_type);
+}
+
+module_init(hash_ipportnet_init);
+module_exit(hash_ipportnet_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
new file mode 100644
index 0000000..c4db202
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -0,0 +1,458 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:net type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:net type of IP sets");
+MODULE_ALIAS("ip_set_hash:net");
+
+/* Type specific function prefix */
+#define TYPE		hash_net
+
+static bool
+hash_net_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_net4_same_set	hash_net_same_set
+#define hash_net6_same_set	hash_net_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_net4_elem {
+	__be32 ip;
+	u16 padding0;
+	u8 padding1;
+	u8 cidr;
+};
+
+/* Member elements with timeout support */
+struct hash_net4_telem {
+	__be32 ip;
+	u16 padding0;
+	u8 padding1;
+	u8 cidr;
+	unsigned long timeout;
+};
+
+static inline bool
+hash_net4_data_equal(const struct hash_net4_elem *ip1,
+		    const struct hash_net4_elem *ip2)
+{
+	return ip1->ip == ip2->ip && ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_net4_data_isnull(const struct hash_net4_elem *elem)
+{
+	return elem->cidr == 0;
+}
+
+static inline void
+hash_net4_data_copy(struct hash_net4_elem *dst,
+		    const struct hash_net4_elem *src)
+{
+	dst->ip = src->ip;
+	dst->cidr = src->cidr;
+}
+
+static inline void
+hash_net4_data_netmask(struct hash_net4_elem *elem, u8 cidr)
+{
+	elem->ip &= ip_set_netmask(cidr);
+	elem->cidr = cidr;
+}
+
+/* Zero CIDR values cannot be stored */
+static inline void
+hash_net4_data_zero_out(struct hash_net4_elem *elem)
+{
+	elem->cidr = 0;
+}
+
+static bool
+hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data)
+{
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+static bool
+hash_net4_data_tlist(struct sk_buff *skb, const struct hash_net4_elem *data)
+{
+	const struct hash_net4_telem *tdata =
+		(const struct hash_net4_telem *)data;
+
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr);
+	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+		      htonl(ip_set_timeout_get(tdata->timeout)));
+
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+#define IP_SET_HASH_WITH_NETS
+
+#define PF		4
+#define HOST_MASK	32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
+	       enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_net4_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
+
+	if (data.cidr == 0)
+		return -EINVAL;
+	if (adt == IPSET_TEST)
+		data.cidr = HOST_MASK;
+
+	ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+	data.ip &= ip_set_netmask(data.cidr);
+
+	return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
+	       enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_net4_elem data = { .cidr = HOST_MASK };
+	u32 timeout = h->timeout;
+	int ret;
+
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_CIDR])
+		data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+	if (!data.cidr)
+		return -IPSET_ERR_INVALID_CIDR;
+
+	data.ip &= ip_set_netmask(data.cidr);
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		if (!with_timeout(h->timeout))
+			return -IPSET_ERR_TIMEOUT;
+		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+	}
+
+	ret = adtfn(set, &data, timeout);
+
+	return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static bool
+hash_net_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+	const struct ip_set_hash *x = a->data;
+	const struct ip_set_hash *y = b->data;
+
+	/* Resizing changes htable_bits, so we ignore it */
+	return x->maxelem == y->maxelem &&
+	       x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_net6_elem {
+	union nf_inet_addr ip;
+	u16 padding0;
+	u8 padding1;
+	u8 cidr;
+};
+
+struct hash_net6_telem {
+	union nf_inet_addr ip;
+	u16 padding0;
+	u8 padding1;
+	u8 cidr;
+	unsigned long timeout;
+};
+
+static inline bool
+hash_net6_data_equal(const struct hash_net6_elem *ip1,
+		     const struct hash_net6_elem *ip2)
+{
+	return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+	       ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_net6_data_isnull(const struct hash_net6_elem *elem)
+{
+	return elem->cidr == 0;
+}
+
+static inline void
+hash_net6_data_copy(struct hash_net6_elem *dst,
+		    const struct hash_net6_elem *src)
+{
+	ipv6_addr_copy(&dst->ip.in6, &src->ip.in6);
+	dst->cidr = src->cidr;
+}
+
+static inline void
+hash_net6_data_zero_out(struct hash_net6_elem *elem)
+{
+	elem->cidr = 0;
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+	ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+	ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+	ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+	ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static inline void
+hash_net6_data_netmask(struct hash_net6_elem *elem, u8 cidr)
+{
+	ip6_netmask(&elem->ip, cidr);
+	elem->cidr = cidr;
+}
+
+static bool
+hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data)
+{
+	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+static bool
+hash_net6_data_tlist(struct sk_buff *skb, const struct hash_net6_elem *data)
+{
+	const struct hash_net6_telem *e =
+		(const struct hash_net6_telem *)data;
+
+	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr);
+	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+		      htonl(ip_set_timeout_get(e->timeout)));
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF		6
+#define HOST_MASK	128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
+	       enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_net6_elem data = { .cidr = h->nets[0].cidr || HOST_MASK };
+
+	if (data.cidr == 0)
+		return -EINVAL;
+	if (adt == IPSET_TEST)
+		data.cidr = HOST_MASK;
+
+	ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+	ip6_netmask(&data.ip, data.cidr);
+
+	return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
+	       enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_net6_elem data = { .cidr = HOST_MASK };
+	u32 timeout = h->timeout;
+	int ret;
+
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_CIDR])
+		data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+	if (!data.cidr)
+		return -IPSET_ERR_INVALID_CIDR;
+
+	ip6_netmask(&data.ip, data.cidr);
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		if (!with_timeout(h->timeout))
+			return -IPSET_ERR_TIMEOUT;
+		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+	}
+
+	ret = adtfn(set, &data, timeout);
+
+	return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+	u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+	struct ip_set_hash *h;
+	u8 hbits;
+
+	if (!(set->family == AF_INET || set->family == AF_INET6))
+		return -IPSET_ERR_INVALID_FAMILY;
+
+	if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_HASHSIZE]) {
+		hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+		if (hashsize < IPSET_MIMINAL_HASHSIZE)
+			hashsize = IPSET_MIMINAL_HASHSIZE;
+	}
+
+	if (tb[IPSET_ATTR_MAXELEM])
+		maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+	h = kzalloc(sizeof(*h)
+		    + sizeof(struct ip_set_hash_nets)
+		      * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+	if (!h)
+		return -ENOMEM;
+
+	h->maxelem = maxelem;
+	get_random_bytes(&h->initval, sizeof(h->initval));
+	h->timeout = IPSET_NO_TIMEOUT;
+
+	hbits = htable_bits(hashsize);
+	h->table = ip_set_alloc(
+			sizeof(struct htable)
+			+ jhash_size(hbits) * sizeof(struct hbucket));
+	if (!h->table) {
+		kfree(h);
+		return -ENOMEM;
+	}
+	h->table->htable_bits = hbits;
+
+	set->data = h;
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+		set->variant = set->family == AF_INET
+			? &hash_net4_tvariant : &hash_net6_tvariant;
+
+		if (set->family == AF_INET)
+			hash_net4_gc_init(set);
+		else
+			hash_net6_gc_init(set);
+	} else {
+		set->variant = set->family == AF_INET
+			? &hash_net4_variant : &hash_net6_variant;
+	}
+
+	pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+		 set->name, jhash_size(h->table->htable_bits),
+		 h->table->htable_bits, h->maxelem, set->data, h->table);
+
+	return 0;
+}
+
+static struct ip_set_type hash_net_type __read_mostly = {
+	.name		= "hash:net",
+	.protocol	= IPSET_PROTOCOL,
+	.features	= IPSET_TYPE_IP,
+	.dimension	= IPSET_DIM_ONE,
+	.family		= AF_UNSPEC,
+	.revision	= 0,
+	.create		= hash_net_create,
+	.create_policy	= {
+		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
+		[IPSET_ATTR_MAXELEM]	= { .type = NLA_U32 },
+		[IPSET_ATTR_PROBES]	= { .type = NLA_U8 },
+		[IPSET_ATTR_RESIZE]	= { .type = NLA_U8  },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+	},
+	.adt_policy	= {
+		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
+		[IPSET_ATTR_CIDR]	= { .type = NLA_U8 },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+	},
+	.me		= THIS_MODULE,
+};
+
+static int __init
+hash_net_init(void)
+{
+	return ip_set_type_register(&hash_net_type);
+}
+
+static void __exit
+hash_net_fini(void)
+{
+	ip_set_type_unregister(&hash_net_type);
+}
+
+module_init(hash_net_init);
+module_exit(hash_net_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
new file mode 100644
index 0000000..34a1656
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -0,0 +1,578 @@
+/* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:net,port type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("hash:net,port type of IP sets");
+MODULE_ALIAS("ip_set_hash:net,port");
+
+/* Type specific function prefix */
+#define TYPE		hash_netport
+
+static bool
+hash_netport_same_set(const struct ip_set *a, const struct ip_set *b);
+
+#define hash_netport4_same_set	hash_netport_same_set
+#define hash_netport6_same_set	hash_netport_same_set
+
+/* The type variant functions: IPv4 */
+
+/* Member elements without timeout */
+struct hash_netport4_elem {
+	__be32 ip;
+	__be16 port;
+	u8 proto;
+	u8 cidr;
+};
+
+/* Member elements with timeout support */
+struct hash_netport4_telem {
+	__be32 ip;
+	__be16 port;
+	u8 proto;
+	u8 cidr;
+	unsigned long timeout;
+};
+
+static inline bool
+hash_netport4_data_equal(const struct hash_netport4_elem *ip1,
+			 const struct hash_netport4_elem *ip2)
+{
+	return ip1->ip == ip2->ip &&
+	       ip1->port == ip2->port &&
+	       ip1->proto == ip2->proto &&
+	       ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_netport4_data_isnull(const struct hash_netport4_elem *elem)
+{
+	return elem->proto == 0;
+}
+
+static inline void
+hash_netport4_data_copy(struct hash_netport4_elem *dst,
+			const struct hash_netport4_elem *src)
+{
+	dst->ip = src->ip;
+	dst->port = src->port;
+	dst->proto = src->proto;
+	dst->cidr = src->cidr;
+}
+
+static inline void
+hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr)
+{
+	elem->ip &= ip_set_netmask(cidr);
+	elem->cidr = cidr;
+}
+
+static inline void
+hash_netport4_data_zero_out(struct hash_netport4_elem *elem)
+{
+	elem->proto = 0;
+}
+
+static bool
+hash_netport4_data_list(struct sk_buff *skb,
+			const struct hash_netport4_elem *data)
+{
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
+	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+static bool
+hash_netport4_data_tlist(struct sk_buff *skb,
+			 const struct hash_netport4_elem *data)
+{
+	const struct hash_netport4_telem *tdata =
+		(const struct hash_netport4_telem *)data;
+
+	NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
+	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
+	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+		      htonl(ip_set_timeout_get(tdata->timeout)));
+
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+#define IP_SET_HASH_WITH_PROTO
+#define IP_SET_HASH_WITH_NETS
+
+#define PF		4
+#define HOST_MASK	32
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
+		   enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_netport4_elem data = {
+		.cidr = h->nets[0].cidr || HOST_MASK };
+
+	if (data.cidr == 0)
+		return -EINVAL;
+	if (adt == IPSET_TEST)
+		data.cidr = HOST_MASK;
+
+	if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC,
+				 &data.port, &data.proto))
+		return -EINVAL;
+
+	ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip);
+	data.ip &= ip_set_netmask(data.cidr);
+
+	return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
+		   enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_netport4_elem data = { .cidr = HOST_MASK };
+	u32 port, port_to;
+	u32 timeout = h->timeout;
+	int ret;
+
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_CIDR])
+		data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+	if (!data.cidr)
+		return -IPSET_ERR_INVALID_CIDR;
+	data.ip &= ip_set_netmask(data.cidr);
+
+	if (tb[IPSET_ATTR_PORT])
+		data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+	else
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_PROTO]) {
+		data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+		if (data.proto == 0)
+			return -IPSET_ERR_INVALID_PROTO;
+	} else
+		return -IPSET_ERR_MISSING_PROTO;
+
+	switch (data.proto) {
+	case IPPROTO_UDP:
+	case IPPROTO_TCP:
+	case IPPROTO_ICMP:
+		break;
+	default:
+		data.port = 0;
+		break;
+	}
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		if (!with_timeout(h->timeout))
+			return -IPSET_ERR_TIMEOUT;
+		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+	}
+
+	if (adt == IPSET_TEST ||
+	    !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+	    !tb[IPSET_ATTR_PORT_TO]) {
+		ret = adtfn(set, &data, timeout);
+		return ip_set_eexist(ret, flags) ? 0 : ret;
+	}
+
+	port = ntohs(data.port);
+	port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+	if (port > port_to)
+		swap(port, port_to);
+
+	for (; port <= port_to; port++) {
+		data.port = htons(port);
+		ret = adtfn(set, &data, timeout);
+
+		if (ret && !ip_set_eexist(ret, flags))
+			return ret;
+		else
+			ret = 0;
+	}
+	return ret;
+}
+
+static bool
+hash_netport_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+	const struct ip_set_hash *x = a->data;
+	const struct ip_set_hash *y = b->data;
+
+	/* Resizing changes htable_bits, so we ignore it */
+	return x->maxelem == y->maxelem &&
+	       x->timeout == y->timeout;
+}
+
+/* The type variant functions: IPv6 */
+
+struct hash_netport6_elem {
+	union nf_inet_addr ip;
+	__be16 port;
+	u8 proto;
+	u8 cidr;
+};
+
+struct hash_netport6_telem {
+	union nf_inet_addr ip;
+	__be16 port;
+	u8 proto;
+	u8 cidr;
+	unsigned long timeout;
+};
+
+static inline bool
+hash_netport6_data_equal(const struct hash_netport6_elem *ip1,
+			 const struct hash_netport6_elem *ip2)
+{
+	return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
+	       ip1->port == ip2->port &&
+	       ip1->proto == ip2->proto &&
+	       ip1->cidr == ip2->cidr;
+}
+
+static inline bool
+hash_netport6_data_isnull(const struct hash_netport6_elem *elem)
+{
+	return elem->proto == 0;
+}
+
+static inline void
+hash_netport6_data_copy(struct hash_netport6_elem *dst,
+			const struct hash_netport6_elem *src)
+{
+	memcpy(dst, src, sizeof(*dst));
+}
+
+static inline void
+hash_netport6_data_zero_out(struct hash_netport6_elem *elem)
+{
+	elem->proto = 0;
+}
+
+static inline void
+ip6_netmask(union nf_inet_addr *ip, u8 prefix)
+{
+	ip->ip6[0] &= ip_set_netmask6(prefix)[0];
+	ip->ip6[1] &= ip_set_netmask6(prefix)[1];
+	ip->ip6[2] &= ip_set_netmask6(prefix)[2];
+	ip->ip6[3] &= ip_set_netmask6(prefix)[3];
+}
+
+static inline void
+hash_netport6_data_netmask(struct hash_netport6_elem *elem, u8 cidr)
+{
+	ip6_netmask(&elem->ip, cidr);
+	elem->cidr = cidr;
+}
+
+static bool
+hash_netport6_data_list(struct sk_buff *skb,
+			const struct hash_netport6_elem *data)
+{
+	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
+	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+static bool
+hash_netport6_data_tlist(struct sk_buff *skb,
+			 const struct hash_netport6_elem *data)
+{
+	const struct hash_netport6_telem *e =
+		(const struct hash_netport6_telem *)data;
+
+	NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
+	NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
+	NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+	NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+	NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+		      htonl(ip_set_timeout_get(e->timeout)));
+	return 0;
+
+nla_put_failure:
+	return 1;
+}
+
+#undef PF
+#undef HOST_MASK
+
+#define PF		6
+#define HOST_MASK	128
+#include <linux/netfilter/ipset/ip_set_ahash.h>
+
+static int
+hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
+		   enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_netport6_elem data = {
+		.cidr = h->nets[0].cidr || HOST_MASK };
+
+	if (data.cidr == 0)
+		return -EINVAL;
+	if (adt == IPSET_TEST)
+		data.cidr = HOST_MASK;
+
+	if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC,
+				 &data.port, &data.proto))
+		return -EINVAL;
+
+	ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
+	ip6_netmask(&data.ip, data.cidr);
+
+	return adtfn(set, &data, h->timeout);
+}
+
+static int
+hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
+		   enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+	const struct ip_set_hash *h = set->data;
+	ipset_adtfn adtfn = set->variant->adt[adt];
+	struct hash_netport6_elem data = { .cidr = HOST_MASK };
+	u32 port, port_to;
+	u32 timeout = h->timeout;
+	int ret;
+
+	if (unlikely(!tb[IPSET_ATTR_IP] ||
+		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
+	if (ret)
+		return ret;
+
+	if (tb[IPSET_ATTR_CIDR])
+		data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+	if (!data.cidr)
+		return -IPSET_ERR_INVALID_CIDR;
+	ip6_netmask(&data.ip, data.cidr);
+
+	if (tb[IPSET_ATTR_PORT])
+		data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+	else
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_PROTO]) {
+		data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+
+		if (data.proto == 0)
+			return -IPSET_ERR_INVALID_PROTO;
+	} else
+		return -IPSET_ERR_MISSING_PROTO;
+
+	switch (data.proto) {
+	case IPPROTO_UDP:
+	case IPPROTO_TCP:
+	case IPPROTO_ICMPV6:
+		break;
+	default:
+		data.port = 0;
+		break;
+	}
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		if (!with_timeout(h->timeout))
+			return -IPSET_ERR_TIMEOUT;
+		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+	}
+
+	if (adt == IPSET_TEST ||
+	    !(data.proto == IPPROTO_TCP || data.proto == IPPROTO_UDP) ||
+	    !tb[IPSET_ATTR_PORT_TO]) {
+		ret = adtfn(set, &data, timeout);
+		return ip_set_eexist(ret, flags) ? 0 : ret;
+	}
+
+	port = ntohs(data.port);
+	port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+	if (port > port_to)
+		swap(port, port_to);
+
+	for (; port <= port_to; port++) {
+		data.port = htons(port);
+		ret = adtfn(set, &data, timeout);
+
+		if (ret && !ip_set_eexist(ret, flags))
+			return ret;
+		else
+			ret = 0;
+	}
+	return ret;
+}
+
+/* Create hash:ip type of sets */
+
+static int
+hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+	struct ip_set_hash *h;
+	u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
+	u8 hbits;
+
+	if (!(set->family == AF_INET || set->family == AF_INET6))
+		return -IPSET_ERR_INVALID_FAMILY;
+
+	if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_HASHSIZE]) {
+		hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
+		if (hashsize < IPSET_MIMINAL_HASHSIZE)
+			hashsize = IPSET_MIMINAL_HASHSIZE;
+	}
+
+	if (tb[IPSET_ATTR_MAXELEM])
+		maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
+
+	h = kzalloc(sizeof(*h)
+		    + sizeof(struct ip_set_hash_nets)
+		      * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+	if (!h)
+		return -ENOMEM;
+
+	h->maxelem = maxelem;
+	get_random_bytes(&h->initval, sizeof(h->initval));
+	h->timeout = IPSET_NO_TIMEOUT;
+
+	hbits = htable_bits(hashsize);
+	h->table = ip_set_alloc(
+			sizeof(struct htable)
+			+ jhash_size(hbits) * sizeof(struct hbucket));
+	if (!h->table) {
+		kfree(h);
+		return -ENOMEM;
+	}
+	h->table->htable_bits = hbits;
+
+	set->data = h;
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+
+		set->variant = set->family == AF_INET
+			? &hash_netport4_tvariant : &hash_netport6_tvariant;
+
+		if (set->family == AF_INET)
+			hash_netport4_gc_init(set);
+		else
+			hash_netport6_gc_init(set);
+	} else {
+		set->variant = set->family == AF_INET
+			? &hash_netport4_variant : &hash_netport6_variant;
+	}
+
+	pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
+		 set->name, jhash_size(h->table->htable_bits),
+		 h->table->htable_bits, h->maxelem, set->data, h->table);
+
+	return 0;
+}
+
+static struct ip_set_type hash_netport_type __read_mostly = {
+	.name		= "hash:net,port",
+	.protocol	= IPSET_PROTOCOL,
+	.features	= IPSET_TYPE_IP | IPSET_TYPE_PORT,
+	.dimension	= IPSET_DIM_TWO,
+	.family		= AF_UNSPEC,
+	.revision	= 0,
+	.create		= hash_netport_create,
+	.create_policy	= {
+		[IPSET_ATTR_HASHSIZE]	= { .type = NLA_U32 },
+		[IPSET_ATTR_MAXELEM]	= { .type = NLA_U32 },
+		[IPSET_ATTR_PROBES]	= { .type = NLA_U8 },
+		[IPSET_ATTR_RESIZE]	= { .type = NLA_U8  },
+		[IPSET_ATTR_PROTO]	= { .type = NLA_U8 },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+	},
+	.adt_policy	= {
+		[IPSET_ATTR_IP]		= { .type = NLA_NESTED },
+		[IPSET_ATTR_PORT]	= { .type = NLA_U16 },
+		[IPSET_ATTR_PORT_TO]	= { .type = NLA_U16 },
+		[IPSET_ATTR_PROTO]	= { .type = NLA_U8 },
+		[IPSET_ATTR_CIDR]	= { .type = NLA_U8 },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
+	},
+	.me		= THIS_MODULE,
+};
+
+static int __init
+hash_netport_init(void)
+{
+	return ip_set_type_register(&hash_netport_type);
+}
+
+static void __exit
+hash_netport_fini(void)
+{
+	ip_set_type_unregister(&hash_netport_type);
+}
+
+module_init(hash_netport_init);
+module_exit(hash_netport_fini);
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
new file mode 100644
index 0000000..a47c329
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -0,0 +1,584 @@
+/* Copyright (C) 2008-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the list:set type */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_list.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("list:set type of IP sets");
+MODULE_ALIAS("ip_set_list:set");
+
+/* Member elements without and with timeout */
+struct set_elem {
+	ip_set_id_t id;
+};
+
+struct set_telem {
+	ip_set_id_t id;
+	unsigned long timeout;
+};
+
+/* Type structure */
+struct list_set {
+	size_t dsize;		/* element size */
+	u32 size;		/* size of set list array */
+	u32 timeout;		/* timeout value */
+	struct timer_list gc;	/* garbage collection */
+	struct set_elem members[0]; /* the set members */
+};
+
+static inline struct set_elem *
+list_set_elem(const struct list_set *map, u32 id)
+{
+	return (struct set_elem *)((char *)map->members + id * map->dsize);
+}
+
+static inline bool
+list_set_timeout(const struct list_set *map, u32 id)
+{
+	const struct set_telem *elem =
+		(const struct set_telem *) list_set_elem(map, id);
+
+	return ip_set_timeout_test(elem->timeout);
+}
+
+static inline bool
+list_set_expired(const struct list_set *map, u32 id)
+{
+	const struct set_telem *elem =
+		(const struct set_telem *) list_set_elem(map, id);
+
+	return ip_set_timeout_expired(elem->timeout);
+}
+
+static inline int
+list_set_exist(const struct set_telem *elem)
+{
+	return elem->id != IPSET_INVALID_ID &&
+	       !ip_set_timeout_expired(elem->timeout);
+}
+
+/* Set list without and with timeout */
+
+static int
+list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
+	      enum ipset_adt adt, u8 pf, u8 dim, u8 flags)
+{
+	struct list_set *map = set->data;
+	struct set_elem *elem;
+	u32 i;
+	int ret;
+
+	for (i = 0; i < map->size; i++) {
+		elem = list_set_elem(map, i);
+		if (elem->id == IPSET_INVALID_ID)
+			return 0;
+		if (with_timeout(map->timeout) && list_set_expired(map, i))
+			continue;
+		switch (adt) {
+		case IPSET_TEST:
+			ret = ip_set_test(elem->id, skb, pf, dim, flags);
+			if (ret > 0)
+				return ret;
+			break;
+		case IPSET_ADD:
+			ret = ip_set_add(elem->id, skb, pf, dim, flags);
+			if (ret == 0)
+				return ret;
+			break;
+		case IPSET_DEL:
+			ret = ip_set_del(elem->id, skb, pf, dim, flags);
+			if (ret == 0)
+				return ret;
+			break;
+		default:
+			break;
+		}
+	}
+	return -EINVAL;
+}
+
+static bool
+next_id_eq(const struct list_set *map, u32 i, ip_set_id_t id)
+{
+	const struct set_elem *elem;
+
+	if (i + 1 < map->size) {
+		elem = list_set_elem(map, i + 1);
+		return !!(elem->id == id &&
+			  !(with_timeout(map->timeout) &&
+			    list_set_expired(map, i + 1)));
+	}
+
+	return 0;
+}
+
+static void
+list_elem_add(struct list_set *map, u32 i, ip_set_id_t id)
+{
+	struct set_elem *e;
+
+	for (; i < map->size; i++) {
+		e = list_set_elem(map, i);
+		swap(e->id, id);
+		if (e->id == IPSET_INVALID_ID)
+			break;
+	}
+}
+
+static void
+list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id,
+	       unsigned long timeout)
+{
+	struct set_telem *e;
+
+	for (; i < map->size; i++) {
+		e = (struct set_telem *)list_set_elem(map, i);
+		swap(e->id, id);
+		if (e->id == IPSET_INVALID_ID)
+			break;
+		swap(e->timeout, timeout);
+	}
+}
+
+static int
+list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
+	     unsigned long timeout)
+{
+	const struct set_elem *e = list_set_elem(map, i);
+
+	if (i == map->size - 1 && e->id != IPSET_INVALID_ID)
+		/* Last element replaced: e.g. add new,before,last */
+		ip_set_put_byindex(e->id);
+	if (with_timeout(map->timeout))
+		list_elem_tadd(map, i, id, timeout);
+	else
+		list_elem_add(map, i, id);
+
+	return 0;
+}
+
+static int
+list_set_del(struct list_set *map, ip_set_id_t id, u32 i)
+{
+	struct set_elem *a = list_set_elem(map, i), *b;
+
+	ip_set_put_byindex(id);
+
+	for (; i < map->size - 1; i++) {
+		b = list_set_elem(map, i + 1);
+		a->id = b->id;
+		if (with_timeout(map->timeout))
+			((struct set_telem *)a)->timeout =
+				((struct set_telem *)b)->timeout;
+		a = b;
+		if (a->id == IPSET_INVALID_ID)
+			break;
+	}
+	/* Last element */
+	a->id = IPSET_INVALID_ID;
+	return 0;
+}
+
+static int
+list_set_uadt(struct ip_set *set, struct nlattr *tb[],
+	      enum ipset_adt adt, u32 *lineno, u32 flags)
+{
+	struct list_set *map = set->data;
+	bool with_timeout = with_timeout(map->timeout);
+	int before = 0;
+	u32 timeout = map->timeout;
+	ip_set_id_t id, refid = IPSET_INVALID_ID;
+	const struct set_elem *elem;
+	struct ip_set *s;
+	u32 i;
+	int ret = 0;
+
+	if (unlikely(!tb[IPSET_ATTR_NAME] ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_LINENO])
+		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+	id = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAME]), &s);
+	if (id == IPSET_INVALID_ID)
+		return -IPSET_ERR_NAME;
+	/* "Loop detection" */
+	if (s->type->features & IPSET_TYPE_NAME) {
+		ret = -IPSET_ERR_LOOP;
+		goto finish;
+	}
+
+	if (tb[IPSET_ATTR_CADT_FLAGS]) {
+		u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+		before = f & IPSET_FLAG_BEFORE;
+	}
+
+	if (before && !tb[IPSET_ATTR_NAMEREF]) {
+		ret = -IPSET_ERR_BEFORE;
+		goto finish;
+	}
+
+	if (tb[IPSET_ATTR_NAMEREF]) {
+		refid = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAMEREF]),
+					  &s);
+		if (refid == IPSET_INVALID_ID) {
+			ret = -IPSET_ERR_NAMEREF;
+			goto finish;
+		}
+		if (!before)
+			before = -1;
+	}
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		if (!with_timeout) {
+			ret = -IPSET_ERR_TIMEOUT;
+			goto finish;
+		}
+		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+	}
+
+	switch (adt) {
+	case IPSET_TEST:
+		for (i = 0; i < map->size && !ret; i++) {
+			elem = list_set_elem(map, i);
+			if (elem->id == IPSET_INVALID_ID ||
+			    (before != 0 && i + 1 >= map->size))
+				break;
+			else if (with_timeout && list_set_expired(map, i))
+				continue;
+			else if (before > 0 && elem->id == id)
+				ret = next_id_eq(map, i, refid);
+			else if (before < 0 && elem->id == refid)
+				ret = next_id_eq(map, i, id);
+			else if (before == 0 && elem->id == id)
+				ret = 1;
+		}
+		break;
+	case IPSET_ADD:
+		for (i = 0; i < map->size && !ret; i++) {
+			elem = list_set_elem(map, i);
+			if (elem->id == id &&
+			    !(with_timeout && list_set_expired(map, i)))
+				ret = -IPSET_ERR_EXIST;
+		}
+		if (ret == -IPSET_ERR_EXIST)
+			break;
+		ret = -IPSET_ERR_LIST_FULL;
+		for (i = 0; i < map->size && ret == -IPSET_ERR_LIST_FULL; i++) {
+			elem = list_set_elem(map, i);
+			if (elem->id == IPSET_INVALID_ID)
+				ret = before != 0 ? -IPSET_ERR_REF_EXIST
+					: list_set_add(map, i, id, timeout);
+			else if (elem->id != refid)
+				continue;
+			else if (with_timeout && list_set_expired(map, i))
+				ret = -IPSET_ERR_REF_EXIST;
+			else if (before)
+				ret = list_set_add(map, i, id, timeout);
+			else if (i + 1 < map->size)
+				ret = list_set_add(map, i + 1, id, timeout);
+		}
+		break;
+	case IPSET_DEL:
+		ret = -IPSET_ERR_EXIST;
+		for (i = 0; i < map->size && ret == -IPSET_ERR_EXIST; i++) {
+			elem = list_set_elem(map, i);
+			if (elem->id == IPSET_INVALID_ID) {
+				ret = before != 0 ? -IPSET_ERR_REF_EXIST
+						  : -IPSET_ERR_EXIST;
+				break;
+			} else if (with_timeout && list_set_expired(map, i))
+				continue;
+			else if (elem->id == id &&
+				 (before == 0 ||
+				  (before > 0 &&
+				   next_id_eq(map, i, refid))))
+				ret = list_set_del(map, id, i);
+			else if (before < 0 &&
+				 elem->id == refid &&
+				 next_id_eq(map, i, id))
+				ret = list_set_del(map, id, i + 1);
+		}
+		break;
+	default:
+		break;
+	}
+
+finish:
+	if (refid != IPSET_INVALID_ID)
+		ip_set_put_byindex(refid);
+	if (adt != IPSET_ADD || ret)
+		ip_set_put_byindex(id);
+
+	return ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static void
+list_set_flush(struct ip_set *set)
+{
+	struct list_set *map = set->data;
+	struct set_elem *elem;
+	u32 i;
+
+	for (i = 0; i < map->size; i++) {
+		elem = list_set_elem(map, i);
+		if (elem->id != IPSET_INVALID_ID) {
+			ip_set_put_byindex(elem->id);
+			elem->id = IPSET_INVALID_ID;
+		}
+	}
+}
+
+static void
+list_set_destroy(struct ip_set *set)
+{
+	struct list_set *map = set->data;
+
+	if (with_timeout(map->timeout))
+		del_timer_sync(&map->gc);
+	list_set_flush(set);
+	kfree(map);
+
+	set->data = NULL;
+}
+
+static int
+list_set_head(struct ip_set *set, struct sk_buff *skb)
+{
+	const struct list_set *map = set->data;
+	struct nlattr *nested;
+
+	nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+	if (!nested)
+		goto nla_put_failure;
+	NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size));
+	if (with_timeout(map->timeout))
+		NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
+	NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES,
+		      htonl(atomic_read(&set->ref) - 1));
+	NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
+		      htonl(sizeof(*map) + map->size * map->dsize));
+	ipset_nest_end(skb, nested);
+
+	return 0;
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+static int
+list_set_list(const struct ip_set *set,
+	      struct sk_buff *skb, struct netlink_callback *cb)
+{
+	const struct list_set *map = set->data;
+	struct nlattr *atd, *nested;
+	u32 i, first = cb->args[2];
+	const struct set_elem *e;
+
+	atd = ipset_nest_start(skb, IPSET_ATTR_ADT);
+	if (!atd)
+		return -EMSGSIZE;
+	for (; cb->args[2] < map->size; cb->args[2]++) {
+		i = cb->args[2];
+		e = list_set_elem(map, i);
+		if (e->id == IPSET_INVALID_ID)
+			goto finish;
+		if (with_timeout(map->timeout) && list_set_expired(map, i))
+			continue;
+		nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
+		if (!nested) {
+			if (i == first) {
+				nla_nest_cancel(skb, atd);
+				return -EMSGSIZE;
+			} else
+				goto nla_put_failure;
+		}
+		NLA_PUT_STRING(skb, IPSET_ATTR_NAME,
+			       ip_set_name_byindex(e->id));
+		if (with_timeout(map->timeout)) {
+			const struct set_telem *te =
+				(const struct set_telem *) e;
+			NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
+				      htonl(ip_set_timeout_get(te->timeout)));
+		}
+		ipset_nest_end(skb, nested);
+	}
+finish:
+	ipset_nest_end(skb, atd);
+	/* Set listing finished */
+	cb->args[2] = 0;
+	return 0;
+
+nla_put_failure:
+	nla_nest_cancel(skb, nested);
+	ipset_nest_end(skb, atd);
+	if (unlikely(i == first)) {
+		cb->args[2] = 0;
+		return -EMSGSIZE;
+	}
+	return 0;
+}
+
+static bool
+list_set_same_set(const struct ip_set *a, const struct ip_set *b)
+{
+	const struct list_set *x = a->data;
+	const struct list_set *y = b->data;
+
+	return x->size == y->size &&
+	       x->timeout == y->timeout;
+}
+
+static const struct ip_set_type_variant list_set = {
+	.kadt	= list_set_kadt,
+	.uadt	= list_set_uadt,
+	.destroy = list_set_destroy,
+	.flush	= list_set_flush,
+	.head	= list_set_head,
+	.list	= list_set_list,
+	.same_set = list_set_same_set,
+};
+
+static void
+list_set_gc(unsigned long ul_set)
+{
+	struct ip_set *set = (struct ip_set *) ul_set;
+	struct list_set *map = set->data;
+	struct set_telem *e;
+	u32 i;
+
+	/* We run parallel with other readers (test element)
+	 * but adding/deleting new entries is locked out */
+	read_lock_bh(&set->lock);
+	for (i = map->size - 1; i >= 0; i--) {
+		e = (struct set_telem *) list_set_elem(map, i);
+		if (e->id != IPSET_INVALID_ID &&
+		    list_set_expired(map, i))
+			list_set_del(map, e->id, i);
+	}
+	read_unlock_bh(&set->lock);
+
+	map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+	add_timer(&map->gc);
+}
+
+static void
+list_set_gc_init(struct ip_set *set)
+{
+	struct list_set *map = set->data;
+
+	init_timer(&map->gc);
+	map->gc.data = (unsigned long) set;
+	map->gc.function = list_set_gc;
+	map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+	add_timer(&map->gc);
+}
+
+/* Create list:set type of sets */
+
+static bool
+init_list_set(struct ip_set *set, u32 size, size_t dsize,
+	      unsigned long timeout)
+{
+	struct list_set *map;
+	struct set_elem *e;
+	u32 i;
+
+	map = kzalloc(sizeof(*map) + size * dsize, GFP_KERNEL);
+	if (!map)
+		return false;
+
+	map->size = size;
+	map->dsize = dsize;
+	map->timeout = timeout;
+	set->data = map;
+
+	for (i = 0; i < size; i++) {
+		e = list_set_elem(map, i);
+		e->id = IPSET_INVALID_ID;
+	}
+
+	return true;
+}
+
+static int
+list_set_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+{
+	u32 size = IP_SET_LIST_DEFAULT_SIZE;
+
+	if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_SIZE) ||
+		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+		return -IPSET_ERR_PROTOCOL;
+
+	if (tb[IPSET_ATTR_SIZE])
+		size = ip_set_get_h32(tb[IPSET_ATTR_SIZE]);
+	if (size < IP_SET_LIST_MIN_SIZE)
+		size = IP_SET_LIST_MIN_SIZE;
+
+	if (tb[IPSET_ATTR_TIMEOUT]) {
+		if (!init_list_set(set, size, sizeof(struct set_telem),
+				   ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT])))
+			return -ENOMEM;
+
+		list_set_gc_init(set);
+	} else {
+		if (!init_list_set(set, size, sizeof(struct set_elem),
+				   IPSET_NO_TIMEOUT))
+			return -ENOMEM;
+	}
+	set->variant = &list_set;
+	return 0;
+}
+
+static struct ip_set_type list_set_type __read_mostly = {
+	.name		= "list:set",
+	.protocol	= IPSET_PROTOCOL,
+	.features	= IPSET_TYPE_NAME | IPSET_DUMP_LAST,
+	.dimension	= IPSET_DIM_ONE,
+	.family		= AF_UNSPEC,
+	.revision	= 0,
+	.create		= list_set_create,
+	.create_policy	= {
+		[IPSET_ATTR_SIZE]	= { .type = NLA_U32 },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+	},
+	.adt_policy	= {
+		[IPSET_ATTR_NAME]	= { .type = NLA_STRING,
+					    .len = IPSET_MAXNAMELEN },
+		[IPSET_ATTR_NAMEREF]	= { .type = NLA_STRING,
+					    .len = IPSET_MAXNAMELEN },
+		[IPSET_ATTR_TIMEOUT]	= { .type = NLA_U32 },
+		[IPSET_ATTR_LINENO]	= { .type = NLA_U32 },
+		[IPSET_ATTR_CADT_FLAGS]	= { .type = NLA_U32 },
+	},
+	.me		= THIS_MODULE,
+};
+
+static int __init
+list_set_init(void)
+{
+	return ip_set_type_register(&list_set_type);
+}
+
+static void __exit
+list_set_fini(void)
+{
+	ip_set_type_unregister(&list_set_type);
+}
+
+module_init(list_set_init);
+module_exit(list_set_fini);
diff --git a/net/netfilter/ipset/pfxlen.c b/net/netfilter/ipset/pfxlen.c
new file mode 100644
index 0000000..23f8c81
--- /dev/null
+++ b/net/netfilter/ipset/pfxlen.c
@@ -0,0 +1,291 @@
+#include <linux/netfilter/ipset/pfxlen.h>
+
+/*
+ * Prefixlen maps for fast conversions, by Jan Engelhardt.
+ */
+
+#define E(a, b, c, d) \
+	{.ip6 = { \
+		__constant_htonl(a), __constant_htonl(b), \
+		__constant_htonl(c), __constant_htonl(d), \
+	} }
+
+/*
+ * This table works for both IPv4 and IPv6;
+ * just use prefixlen_netmask_map[prefixlength].ip.
+ */
+const union nf_inet_addr ip_set_netmask_map[] = {
+	E(0x00000000, 0x00000000, 0x00000000, 0x00000000),
+	E(0x80000000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xC0000000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xE0000000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xF0000000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xF8000000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFC000000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFE000000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFF000000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFF800000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFC00000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFE00000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFF00000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFF80000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFC0000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFE0000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFF8000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFC000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFE000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFF000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFF800, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFC00, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFE00, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFF00, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFF80, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFC0, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFE0, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFF0, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFF8, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFC, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFE, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0x80000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xC0000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xE0000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xF0000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xF8000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFC000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFE000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFF000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFF800000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFC00000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFE00000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFF00000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFF80000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFC0000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFE0000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFF0000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFF8000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFC000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFE000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFF000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFF800, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFC00, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFE00, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFF00, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFF80, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFC0, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFE0, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFF0, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFF8, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFC, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFE, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0x80000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x80000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF),
+};
+EXPORT_SYMBOL_GPL(ip_set_netmask_map);
+
+#undef  E
+#define E(a, b, c, d) 						\
+	{.ip6 = { (__force __be32) a, (__force __be32) b,	\
+		  (__force __be32) c, (__force __be32) d,	\
+	} }
+
+/*
+ * This table works for both IPv4 and IPv6;
+ * just use prefixlen_hostmask_map[prefixlength].ip.
+ */
+const union nf_inet_addr ip_set_hostmask_map[] = {
+	E(0x00000000, 0x00000000, 0x00000000, 0x00000000),
+	E(0x80000000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xC0000000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xE0000000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xF0000000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xF8000000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFC000000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFE000000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFF000000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFF800000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFC00000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFE00000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFF00000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFF80000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFC0000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFE0000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFF8000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFC000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFE000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFF000, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFF800, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFC00, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFE00, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFF00, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFF80, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFC0, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFE0, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFF0, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFF8, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFC, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFE, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0x80000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xC0000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xE0000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xF0000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xF8000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFC000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFE000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFF000000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFF800000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFC00000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFE00000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFF00000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFF80000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFC0000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFE0000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFF0000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFF8000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFC000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFE000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFF000, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFF800, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFC00, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFE00, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFF00, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFF80, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFC0, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFE0, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFF0, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFF8, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFC, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFE, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0x80000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x80000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE),
+	E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF),
+};
+EXPORT_SYMBOL_GPL(ip_set_hostmask_map);
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index a475ede..5c48ffb 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -43,11 +43,6 @@
 EXPORT_SYMBOL(unregister_ip_vs_app);
 EXPORT_SYMBOL(register_ip_vs_app_inc);
 
-/* ipvs application list head */
-static LIST_HEAD(ip_vs_app_list);
-static DEFINE_MUTEX(__ip_vs_app_mutex);
-
-
 /*
  *	Get an ip_vs_app object
  */
@@ -67,7 +62,8 @@
  *	Allocate/initialize app incarnation and register it in proto apps.
  */
 static int
-ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
+ip_vs_app_inc_new(struct net *net, struct ip_vs_app *app, __u16 proto,
+		  __u16 port)
 {
 	struct ip_vs_protocol *pp;
 	struct ip_vs_app *inc;
@@ -98,7 +94,7 @@
 		}
 	}
 
-	ret = pp->register_app(inc);
+	ret = pp->register_app(net, inc);
 	if (ret)
 		goto out;
 
@@ -119,7 +115,7 @@
  *	Release app incarnation
  */
 static void
-ip_vs_app_inc_release(struct ip_vs_app *inc)
+ip_vs_app_inc_release(struct net *net, struct ip_vs_app *inc)
 {
 	struct ip_vs_protocol *pp;
 
@@ -127,7 +123,7 @@
 		return;
 
 	if (pp->unregister_app)
-		pp->unregister_app(inc);
+		pp->unregister_app(net, inc);
 
 	IP_VS_DBG(9, "%s App %s:%u unregistered\n",
 		  pp->name, inc->name, ntohs(inc->port));
@@ -168,15 +164,17 @@
  *	Register an application incarnation in protocol applications
  */
 int
-register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
+register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
+		       __u16 port)
 {
+	struct netns_ipvs *ipvs = net_ipvs(net);
 	int result;
 
-	mutex_lock(&__ip_vs_app_mutex);
+	mutex_lock(&ipvs->app_mutex);
 
-	result = ip_vs_app_inc_new(app, proto, port);
+	result = ip_vs_app_inc_new(net, app, proto, port);
 
-	mutex_unlock(&__ip_vs_app_mutex);
+	mutex_unlock(&ipvs->app_mutex);
 
 	return result;
 }
@@ -185,16 +183,17 @@
 /*
  *	ip_vs_app registration routine
  */
-int register_ip_vs_app(struct ip_vs_app *app)
+int register_ip_vs_app(struct net *net, struct ip_vs_app *app)
 {
+	struct netns_ipvs *ipvs = net_ipvs(net);
 	/* increase the module use count */
 	ip_vs_use_count_inc();
 
-	mutex_lock(&__ip_vs_app_mutex);
+	mutex_lock(&ipvs->app_mutex);
 
-	list_add(&app->a_list, &ip_vs_app_list);
+	list_add(&app->a_list, &ipvs->app_list);
 
-	mutex_unlock(&__ip_vs_app_mutex);
+	mutex_unlock(&ipvs->app_mutex);
 
 	return 0;
 }
@@ -204,19 +203,20 @@
  *	ip_vs_app unregistration routine
  *	We are sure there are no app incarnations attached to services
  */
-void unregister_ip_vs_app(struct ip_vs_app *app)
+void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app)
 {
+	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_app *inc, *nxt;
 
-	mutex_lock(&__ip_vs_app_mutex);
+	mutex_lock(&ipvs->app_mutex);
 
 	list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
-		ip_vs_app_inc_release(inc);
+		ip_vs_app_inc_release(net, inc);
 	}
 
 	list_del(&app->a_list);
 
-	mutex_unlock(&__ip_vs_app_mutex);
+	mutex_unlock(&ipvs->app_mutex);
 
 	/* decrease the module use count */
 	ip_vs_use_count_dec();
@@ -226,7 +226,8 @@
 /*
  *	Bind ip_vs_conn to its ip_vs_app (called by cp constructor)
  */
-int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp)
+int ip_vs_bind_app(struct ip_vs_conn *cp,
+		   struct ip_vs_protocol *pp)
 {
 	return pp->app_conn_bind(cp);
 }
@@ -481,11 +482,11 @@
  *	/proc/net/ip_vs_app entry function
  */
 
-static struct ip_vs_app *ip_vs_app_idx(loff_t pos)
+static struct ip_vs_app *ip_vs_app_idx(struct netns_ipvs *ipvs, loff_t pos)
 {
 	struct ip_vs_app *app, *inc;
 
-	list_for_each_entry(app, &ip_vs_app_list, a_list) {
+	list_for_each_entry(app, &ipvs->app_list, a_list) {
 		list_for_each_entry(inc, &app->incs_list, a_list) {
 			if (pos-- == 0)
 				return inc;
@@ -497,19 +498,24 @@
 
 static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
 {
-	mutex_lock(&__ip_vs_app_mutex);
+	struct net *net = seq_file_net(seq);
+	struct netns_ipvs *ipvs = net_ipvs(net);
 
-	return *pos ? ip_vs_app_idx(*pos - 1) : SEQ_START_TOKEN;
+	mutex_lock(&ipvs->app_mutex);
+
+	return *pos ? ip_vs_app_idx(ipvs, *pos - 1) : SEQ_START_TOKEN;
 }
 
 static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
 	struct ip_vs_app *inc, *app;
 	struct list_head *e;
+	struct net *net = seq_file_net(seq);
+	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	++*pos;
 	if (v == SEQ_START_TOKEN)
-		return ip_vs_app_idx(0);
+		return ip_vs_app_idx(ipvs, 0);
 
 	inc = v;
 	app = inc->app;
@@ -518,7 +524,7 @@
 		return list_entry(e, struct ip_vs_app, a_list);
 
 	/* go on to next application */
-	for (e = app->a_list.next; e != &ip_vs_app_list; e = e->next) {
+	for (e = app->a_list.next; e != &ipvs->app_list; e = e->next) {
 		app = list_entry(e, struct ip_vs_app, a_list);
 		list_for_each_entry(inc, &app->incs_list, a_list) {
 			return inc;
@@ -529,7 +535,9 @@
 
 static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
 {
-	mutex_unlock(&__ip_vs_app_mutex);
+	struct netns_ipvs *ipvs = net_ipvs(seq_file_net(seq));
+
+	mutex_unlock(&ipvs->app_mutex);
 }
 
 static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
@@ -557,7 +565,8 @@
 
 static int ip_vs_app_open(struct inode *inode, struct file *file)
 {
-	return seq_open(file, &ip_vs_app_seq_ops);
+	return seq_open_net(inode, file, &ip_vs_app_seq_ops,
+			    sizeof(struct seq_net_private));
 }
 
 static const struct file_operations ip_vs_app_fops = {
@@ -569,15 +578,36 @@
 };
 #endif
 
+static int __net_init __ip_vs_app_init(struct net *net)
+{
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
+	INIT_LIST_HEAD(&ipvs->app_list);
+	__mutex_init(&ipvs->app_mutex, "ipvs->app_mutex", &ipvs->app_key);
+	proc_net_fops_create(net, "ip_vs_app", 0, &ip_vs_app_fops);
+	return 0;
+}
+
+static void __net_exit __ip_vs_app_cleanup(struct net *net)
+{
+	proc_net_remove(net, "ip_vs_app");
+}
+
+static struct pernet_operations ip_vs_app_ops = {
+	.init = __ip_vs_app_init,
+	.exit = __ip_vs_app_cleanup,
+};
+
 int __init ip_vs_app_init(void)
 {
-	/* we will replace it with proc_net_ipvs_create() soon */
-	proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops);
-	return 0;
+	int rv;
+
+	rv = register_pernet_subsys(&ip_vs_app_ops);
+	return rv;
 }
 
 
 void ip_vs_app_cleanup(void)
 {
-	proc_net_remove(&init_net, "ip_vs_app");
+	unregister_pernet_subsys(&ip_vs_app_ops);
 }
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index e9adecd..83233fe 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -48,35 +48,32 @@
 /*
  * Connection hash size. Default is what was selected at compile time.
 */
-int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
+static int ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
 module_param_named(conn_tab_bits, ip_vs_conn_tab_bits, int, 0444);
 MODULE_PARM_DESC(conn_tab_bits, "Set connections' hash size");
 
 /* size and mask values */
-int ip_vs_conn_tab_size;
-int ip_vs_conn_tab_mask;
+int ip_vs_conn_tab_size __read_mostly;
+static int ip_vs_conn_tab_mask __read_mostly;
 
 /*
  *  Connection hash table: for input and output packets lookups of IPVS
  */
-static struct list_head *ip_vs_conn_tab;
+static struct list_head *ip_vs_conn_tab __read_mostly;
 
 /*  SLAB cache for IPVS connections */
 static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
 
-/*  counter for current IPVS connections */
-static atomic_t ip_vs_conn_count = ATOMIC_INIT(0);
-
 /*  counter for no client port connections */
 static atomic_t ip_vs_conn_no_cport_cnt = ATOMIC_INIT(0);
 
 /* random value for IPVS connection hash */
-static unsigned int ip_vs_conn_rnd;
+static unsigned int ip_vs_conn_rnd __read_mostly;
 
 /*
  *  Fine locking granularity for big connection hash table
  */
-#define CT_LOCKARRAY_BITS  4
+#define CT_LOCKARRAY_BITS  5
 #define CT_LOCKARRAY_SIZE  (1<<CT_LOCKARRAY_BITS)
 #define CT_LOCKARRAY_MASK  (CT_LOCKARRAY_SIZE-1)
 
@@ -133,19 +130,19 @@
 /*
  *	Returns hash value for IPVS connection entry
  */
-static unsigned int ip_vs_conn_hashkey(int af, unsigned proto,
+static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned proto,
 				       const union nf_inet_addr *addr,
 				       __be16 port)
 {
 #ifdef CONFIG_IP_VS_IPV6
 	if (af == AF_INET6)
-		return jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
-				    (__force u32)port, proto, ip_vs_conn_rnd)
-			& ip_vs_conn_tab_mask;
+		return (jhash_3words(jhash(addr, 16, ip_vs_conn_rnd),
+				    (__force u32)port, proto, ip_vs_conn_rnd) ^
+			((size_t)net>>8)) & ip_vs_conn_tab_mask;
 #endif
-	return jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
-			    ip_vs_conn_rnd)
-		& ip_vs_conn_tab_mask;
+	return (jhash_3words((__force u32)addr->ip, (__force u32)port, proto,
+			    ip_vs_conn_rnd) ^
+		((size_t)net>>8)) & ip_vs_conn_tab_mask;
 }
 
 static unsigned int ip_vs_conn_hashkey_param(const struct ip_vs_conn_param *p,
@@ -166,18 +163,18 @@
 		port = p->vport;
 	}
 
-	return ip_vs_conn_hashkey(p->af, p->protocol, addr, port);
+	return ip_vs_conn_hashkey(p->net, p->af, p->protocol, addr, port);
 }
 
 static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
 {
 	struct ip_vs_conn_param p;
 
-	ip_vs_conn_fill_param(cp->af, cp->protocol, &cp->caddr, cp->cport,
-			      NULL, 0, &p);
+	ip_vs_conn_fill_param(ip_vs_conn_net(cp), cp->af, cp->protocol,
+			      &cp->caddr, cp->cport, NULL, 0, &p);
 
-	if (cp->dest && cp->dest->svc->pe) {
-		p.pe = cp->dest->svc->pe;
+	if (cp->pe) {
+		p.pe = cp->pe;
 		p.pe_data = cp->pe_data;
 		p.pe_data_len = cp->pe_data_len;
 	}
@@ -186,7 +183,7 @@
 }
 
 /*
- *	Hashes ip_vs_conn in ip_vs_conn_tab by proto,addr,port.
+ *	Hashes ip_vs_conn in ip_vs_conn_tab by netns,proto,addr,port.
  *	returns bool success.
  */
 static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
@@ -269,11 +266,12 @@
 
 	list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
 		if (cp->af == p->af &&
+		    p->cport == cp->cport && p->vport == cp->vport &&
 		    ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
 		    ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) &&
-		    p->cport == cp->cport && p->vport == cp->vport &&
 		    ((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
-		    p->protocol == cp->protocol) {
+		    p->protocol == cp->protocol &&
+		    ip_vs_conn_net_eq(cp, p->net)) {
 			/* HIT */
 			atomic_inc(&cp->refcnt);
 			ct_read_unlock(hash);
@@ -313,23 +311,23 @@
 			    struct ip_vs_conn_param *p)
 {
 	__be16 _ports[2], *pptr;
+	struct net *net = skb_net(skb);
 
 	pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
 	if (pptr == NULL)
 		return 1;
 
 	if (likely(!inverse))
-		ip_vs_conn_fill_param(af, iph->protocol, &iph->saddr, pptr[0],
-				      &iph->daddr, pptr[1], p);
+		ip_vs_conn_fill_param(net, af, iph->protocol, &iph->saddr,
+				      pptr[0], &iph->daddr, pptr[1], p);
 	else
-		ip_vs_conn_fill_param(af, iph->protocol, &iph->daddr, pptr[1],
-				      &iph->saddr, pptr[0], p);
+		ip_vs_conn_fill_param(net, af, iph->protocol, &iph->daddr,
+				      pptr[1], &iph->saddr, pptr[0], p);
 	return 0;
 }
 
 struct ip_vs_conn *
 ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
-			struct ip_vs_protocol *pp,
 			const struct ip_vs_iphdr *iph,
 			unsigned int proto_off, int inverse)
 {
@@ -353,8 +351,10 @@
 	ct_read_lock(hash);
 
 	list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
+		if (!ip_vs_conn_net_eq(cp, p->net))
+			continue;
 		if (p->pe_data && p->pe->ct_match) {
-			if (p->pe->ct_match(p, cp))
+			if (p->pe == cp->pe && p->pe->ct_match(p, cp))
 				goto out;
 			continue;
 		}
@@ -404,10 +404,11 @@
 
 	list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
 		if (cp->af == p->af &&
+		    p->vport == cp->cport && p->cport == cp->dport &&
 		    ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
 		    ip_vs_addr_equal(p->af, p->caddr, &cp->daddr) &&
-		    p->vport == cp->cport && p->cport == cp->dport &&
-		    p->protocol == cp->protocol) {
+		    p->protocol == cp->protocol &&
+		    ip_vs_conn_net_eq(cp, p->net)) {
 			/* HIT */
 			atomic_inc(&cp->refcnt);
 			ret = cp;
@@ -428,7 +429,6 @@
 
 struct ip_vs_conn *
 ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
-			 struct ip_vs_protocol *pp,
 			 const struct ip_vs_iphdr *iph,
 			 unsigned int proto_off, int inverse)
 {
@@ -611,9 +611,9 @@
 	struct ip_vs_dest *dest;
 
 	if ((cp) && (!cp->dest)) {
-		dest = ip_vs_find_dest(cp->af, &cp->daddr, cp->dport,
-				       &cp->vaddr, cp->vport,
-				       cp->protocol);
+		dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
+				       cp->dport, &cp->vaddr, cp->vport,
+				       cp->protocol, cp->fwmark);
 		ip_vs_bind_dest(cp, dest);
 		return dest;
 	} else
@@ -686,13 +686,14 @@
 int ip_vs_check_template(struct ip_vs_conn *ct)
 {
 	struct ip_vs_dest *dest = ct->dest;
+	struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(ct));
 
 	/*
 	 * Checking the dest server status.
 	 */
 	if ((dest == NULL) ||
 	    !(dest->flags & IP_VS_DEST_F_AVAILABLE) ||
-	    (sysctl_ip_vs_expire_quiescent_template &&
+	    (ipvs->sysctl_expire_quiescent_template &&
 	     (atomic_read(&dest->weight) == 0))) {
 		IP_VS_DBG_BUF(9, "check_template: dest not available for "
 			      "protocol %s s:%s:%d v:%s:%d "
@@ -730,6 +731,7 @@
 static void ip_vs_conn_expire(unsigned long data)
 {
 	struct ip_vs_conn *cp = (struct ip_vs_conn *)data;
+	struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
 
 	cp->timeout = 60*HZ;
 
@@ -765,13 +767,14 @@
 		if (cp->flags & IP_VS_CONN_F_NFCT)
 			ip_vs_conn_drop_conntrack(cp);
 
+		ip_vs_pe_put(cp->pe);
 		kfree(cp->pe_data);
 		if (unlikely(cp->app != NULL))
 			ip_vs_unbind_app(cp);
 		ip_vs_unbind_dest(cp);
 		if (cp->flags & IP_VS_CONN_F_NO_CPORT)
 			atomic_dec(&ip_vs_conn_no_cport_cnt);
-		atomic_dec(&ip_vs_conn_count);
+		atomic_dec(&ipvs->conn_count);
 
 		kmem_cache_free(ip_vs_conn_cachep, cp);
 		return;
@@ -802,10 +805,12 @@
 struct ip_vs_conn *
 ip_vs_conn_new(const struct ip_vs_conn_param *p,
 	       const union nf_inet_addr *daddr, __be16 dport, unsigned flags,
-	       struct ip_vs_dest *dest)
+	       struct ip_vs_dest *dest, __u32 fwmark)
 {
 	struct ip_vs_conn *cp;
-	struct ip_vs_protocol *pp = ip_vs_proto_get(p->protocol);
+	struct netns_ipvs *ipvs = net_ipvs(p->net);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->net,
+							   p->protocol);
 
 	cp = kmem_cache_zalloc(ip_vs_conn_cachep, GFP_ATOMIC);
 	if (cp == NULL) {
@@ -815,6 +820,7 @@
 
 	INIT_LIST_HEAD(&cp->c_list);
 	setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
+	ip_vs_conn_net_set(cp, p->net);
 	cp->af		   = p->af;
 	cp->protocol	   = p->protocol;
 	ip_vs_addr_copy(p->af, &cp->caddr, p->caddr);
@@ -826,7 +832,10 @@
 			&cp->daddr, daddr);
 	cp->dport          = dport;
 	cp->flags	   = flags;
-	if (flags & IP_VS_CONN_F_TEMPLATE && p->pe_data) {
+	cp->fwmark         = fwmark;
+	if (flags & IP_VS_CONN_F_TEMPLATE && p->pe) {
+		ip_vs_pe_get(p->pe);
+		cp->pe = p->pe;
 		cp->pe_data = p->pe_data;
 		cp->pe_data_len = p->pe_data_len;
 	}
@@ -842,7 +851,7 @@
 	atomic_set(&cp->n_control, 0);
 	atomic_set(&cp->in_pkts, 0);
 
-	atomic_inc(&ip_vs_conn_count);
+	atomic_inc(&ipvs->conn_count);
 	if (flags & IP_VS_CONN_F_NO_CPORT)
 		atomic_inc(&ip_vs_conn_no_cport_cnt);
 
@@ -861,8 +870,8 @@
 #endif
 		ip_vs_bind_xmit(cp);
 
-	if (unlikely(pp && atomic_read(&pp->appcnt)))
-		ip_vs_bind_app(cp, pp);
+	if (unlikely(pd && atomic_read(&pd->appcnt)))
+		ip_vs_bind_app(cp, pd->pp);
 
 	/*
 	 * Allow conntrack to be preserved. By default, conntrack
@@ -871,7 +880,7 @@
 	 * IP_VS_CONN_F_ONE_PACKET too.
 	 */
 
-	if (ip_vs_conntrack_enabled())
+	if (ip_vs_conntrack_enabled(ipvs))
 		cp->flags |= IP_VS_CONN_F_NFCT;
 
 	/* Hash it in the ip_vs_conn_tab finally */
@@ -884,17 +893,22 @@
  *	/proc/net/ip_vs_conn entries
  */
 #ifdef CONFIG_PROC_FS
+struct ip_vs_iter_state {
+	struct seq_net_private p;
+	struct list_head *l;
+};
 
 static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
 {
 	int idx;
 	struct ip_vs_conn *cp;
+	struct ip_vs_iter_state *iter = seq->private;
 
 	for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
 		ct_read_lock_bh(idx);
 		list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
 			if (pos-- == 0) {
-				seq->private = &ip_vs_conn_tab[idx];
+				iter->l = &ip_vs_conn_tab[idx];
 			return cp;
 			}
 		}
@@ -906,14 +920,17 @@
 
 static void *ip_vs_conn_seq_start(struct seq_file *seq, loff_t *pos)
 {
-	seq->private = NULL;
+	struct ip_vs_iter_state *iter = seq->private;
+
+	iter->l = NULL;
 	return *pos ? ip_vs_conn_array(seq, *pos - 1) :SEQ_START_TOKEN;
 }
 
 static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
 	struct ip_vs_conn *cp = v;
-	struct list_head *e, *l = seq->private;
+	struct ip_vs_iter_state *iter = seq->private;
+	struct list_head *e, *l = iter->l;
 	int idx;
 
 	++*pos;
@@ -930,18 +947,19 @@
 	while (++idx < ip_vs_conn_tab_size) {
 		ct_read_lock_bh(idx);
 		list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
-			seq->private = &ip_vs_conn_tab[idx];
+			iter->l = &ip_vs_conn_tab[idx];
 			return cp;
 		}
 		ct_read_unlock_bh(idx);
 	}
-	seq->private = NULL;
+	iter->l = NULL;
 	return NULL;
 }
 
 static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v)
 {
-	struct list_head *l = seq->private;
+	struct ip_vs_iter_state *iter = seq->private;
+	struct list_head *l = iter->l;
 
 	if (l)
 		ct_read_unlock_bh(l - ip_vs_conn_tab);
@@ -955,18 +973,19 @@
    "Pro FromIP   FPrt ToIP     TPrt DestIP   DPrt State       Expires PEName PEData\n");
 	else {
 		const struct ip_vs_conn *cp = v;
+		struct net *net = seq_file_net(seq);
 		char pe_data[IP_VS_PENAME_MAXLEN + IP_VS_PEDATA_MAXLEN + 3];
 		size_t len = 0;
 
-		if (cp->dest && cp->pe_data &&
-		    cp->dest->svc->pe->show_pe_data) {
+		if (!ip_vs_conn_net_eq(cp, net))
+			return 0;
+		if (cp->pe_data) {
 			pe_data[0] = ' ';
-			len = strlen(cp->dest->svc->pe->name);
-			memcpy(pe_data + 1, cp->dest->svc->pe->name, len);
+			len = strlen(cp->pe->name);
+			memcpy(pe_data + 1, cp->pe->name, len);
 			pe_data[len + 1] = ' ';
 			len += 2;
-			len += cp->dest->svc->pe->show_pe_data(cp,
-							       pe_data + len);
+			len += cp->pe->show_pe_data(cp, pe_data + len);
 		}
 		pe_data[len] = '\0';
 
@@ -1004,7 +1023,8 @@
 
 static int ip_vs_conn_open(struct inode *inode, struct file *file)
 {
-	return seq_open(file, &ip_vs_conn_seq_ops);
+	return seq_open_net(inode, file, &ip_vs_conn_seq_ops,
+			    sizeof(struct ip_vs_iter_state));
 }
 
 static const struct file_operations ip_vs_conn_fops = {
@@ -1031,6 +1051,10 @@
    "Pro FromIP   FPrt ToIP     TPrt DestIP   DPrt State       Origin Expires\n");
 	else {
 		const struct ip_vs_conn *cp = v;
+		struct net *net = seq_file_net(seq);
+
+		if (!ip_vs_conn_net_eq(cp, net))
+			return 0;
 
 #ifdef CONFIG_IP_VS_IPV6
 		if (cp->af == AF_INET6)
@@ -1067,7 +1091,8 @@
 
 static int ip_vs_conn_sync_open(struct inode *inode, struct file *file)
 {
-	return seq_open(file, &ip_vs_conn_sync_seq_ops);
+	return seq_open_net(inode, file, &ip_vs_conn_sync_seq_ops,
+			    sizeof(struct ip_vs_iter_state));
 }
 
 static const struct file_operations ip_vs_conn_sync_fops = {
@@ -1113,7 +1138,7 @@
 }
 
 /* Called from keventd and must protect itself from softirqs */
-void ip_vs_random_dropentry(void)
+void ip_vs_random_dropentry(struct net *net)
 {
 	int idx;
 	struct ip_vs_conn *cp;
@@ -1133,7 +1158,8 @@
 			if (cp->flags & IP_VS_CONN_F_TEMPLATE)
 				/* connection template */
 				continue;
-
+			if (!ip_vs_conn_net_eq(cp, net))
+				continue;
 			if (cp->protocol == IPPROTO_TCP) {
 				switch(cp->state) {
 				case IP_VS_TCP_S_SYN_RECV:
@@ -1168,12 +1194,13 @@
 /*
  *      Flush all the connection entries in the ip_vs_conn_tab
  */
-static void ip_vs_conn_flush(void)
+static void ip_vs_conn_flush(struct net *net)
 {
 	int idx;
 	struct ip_vs_conn *cp;
+	struct netns_ipvs *ipvs = net_ipvs(net);
 
-  flush_again:
+flush_again:
 	for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
 		/*
 		 *  Lock is actually needed in this loop.
@@ -1181,7 +1208,8 @@
 		ct_write_lock_bh(idx);
 
 		list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
-
+			if (!ip_vs_conn_net_eq(cp, net))
+				continue;
 			IP_VS_DBG(4, "del connection\n");
 			ip_vs_conn_expire_now(cp);
 			if (cp->control) {
@@ -1194,16 +1222,41 @@
 
 	/* the counter may be not NULL, because maybe some conn entries
 	   are run by slow timer handler or unhashed but still referred */
-	if (atomic_read(&ip_vs_conn_count) != 0) {
+	if (atomic_read(&ipvs->conn_count) != 0) {
 		schedule();
 		goto flush_again;
 	}
 }
+/*
+ * per netns init and exit
+ */
+int __net_init __ip_vs_conn_init(struct net *net)
+{
+	struct netns_ipvs *ipvs = net_ipvs(net);
 
+	atomic_set(&ipvs->conn_count, 0);
+
+	proc_net_fops_create(net, "ip_vs_conn", 0, &ip_vs_conn_fops);
+	proc_net_fops_create(net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops);
+	return 0;
+}
+
+static void __net_exit __ip_vs_conn_cleanup(struct net *net)
+{
+	/* flush all the connection entries first */
+	ip_vs_conn_flush(net);
+	proc_net_remove(net, "ip_vs_conn");
+	proc_net_remove(net, "ip_vs_conn_sync");
+}
+static struct pernet_operations ipvs_conn_ops = {
+	.init = __ip_vs_conn_init,
+	.exit = __ip_vs_conn_cleanup,
+};
 
 int __init ip_vs_conn_init(void)
 {
 	int idx;
+	int retc;
 
 	/* Compute size and mask */
 	ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
@@ -1241,24 +1294,18 @@
 		rwlock_init(&__ip_vs_conntbl_lock_array[idx].l);
 	}
 
-	proc_net_fops_create(&init_net, "ip_vs_conn", 0, &ip_vs_conn_fops);
-	proc_net_fops_create(&init_net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops);
+	retc = register_pernet_subsys(&ipvs_conn_ops);
 
 	/* calculate the random value for connection hash */
 	get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd));
 
-	return 0;
+	return retc;
 }
 
-
 void ip_vs_conn_cleanup(void)
 {
-	/* flush all the connection entries first */
-	ip_vs_conn_flush();
-
+	unregister_pernet_subsys(&ipvs_conn_ops);
 	/* Release the empty cache */
 	kmem_cache_destroy(ip_vs_conn_cachep);
-	proc_net_remove(&init_net, "ip_vs_conn");
-	proc_net_remove(&init_net, "ip_vs_conn_sync");
 	vfree(ip_vs_conn_tab);
 }
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index b4e51e9..4d06617 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -41,6 +41,7 @@
 #include <net/icmp.h>                   /* for icmp_send */
 #include <net/route.h>
 #include <net/ip6_checksum.h>
+#include <net/netns/generic.h>		/* net_generic() */
 
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv4.h>
@@ -68,6 +69,12 @@
 EXPORT_SYMBOL(ip_vs_get_debug_level);
 #endif
 
+int ip_vs_net_id __read_mostly;
+#ifdef IP_VS_GENERIC_NETNS
+EXPORT_SYMBOL(ip_vs_net_id);
+#endif
+/* netns cnt used for uniqueness */
+static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
 
 /* ID used in ICMP lookups */
 #define icmp_id(icmph)          (((icmph)->un).echo.id)
@@ -108,21 +115,28 @@
 ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
 {
 	struct ip_vs_dest *dest = cp->dest;
+	struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+
 	if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
-		spin_lock(&dest->stats.lock);
-		dest->stats.ustats.inpkts++;
-		dest->stats.ustats.inbytes += skb->len;
-		spin_unlock(&dest->stats.lock);
+		struct ip_vs_cpu_stats *s;
 
-		spin_lock(&dest->svc->stats.lock);
-		dest->svc->stats.ustats.inpkts++;
-		dest->svc->stats.ustats.inbytes += skb->len;
-		spin_unlock(&dest->svc->stats.lock);
+		s = this_cpu_ptr(dest->stats.cpustats);
+		s->ustats.inpkts++;
+		u64_stats_update_begin(&s->syncp);
+		s->ustats.inbytes += skb->len;
+		u64_stats_update_end(&s->syncp);
 
-		spin_lock(&ip_vs_stats.lock);
-		ip_vs_stats.ustats.inpkts++;
-		ip_vs_stats.ustats.inbytes += skb->len;
-		spin_unlock(&ip_vs_stats.lock);
+		s = this_cpu_ptr(dest->svc->stats.cpustats);
+		s->ustats.inpkts++;
+		u64_stats_update_begin(&s->syncp);
+		s->ustats.inbytes += skb->len;
+		u64_stats_update_end(&s->syncp);
+
+		s = this_cpu_ptr(ipvs->cpustats);
+		s->ustats.inpkts++;
+		u64_stats_update_begin(&s->syncp);
+		s->ustats.inbytes += skb->len;
+		u64_stats_update_end(&s->syncp);
 	}
 }
 
@@ -131,21 +145,28 @@
 ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb)
 {
 	struct ip_vs_dest *dest = cp->dest;
+	struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+
 	if (dest && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
-		spin_lock(&dest->stats.lock);
-		dest->stats.ustats.outpkts++;
-		dest->stats.ustats.outbytes += skb->len;
-		spin_unlock(&dest->stats.lock);
+		struct ip_vs_cpu_stats *s;
 
-		spin_lock(&dest->svc->stats.lock);
-		dest->svc->stats.ustats.outpkts++;
-		dest->svc->stats.ustats.outbytes += skb->len;
-		spin_unlock(&dest->svc->stats.lock);
+		s = this_cpu_ptr(dest->stats.cpustats);
+		s->ustats.outpkts++;
+		u64_stats_update_begin(&s->syncp);
+		s->ustats.outbytes += skb->len;
+		u64_stats_update_end(&s->syncp);
 
-		spin_lock(&ip_vs_stats.lock);
-		ip_vs_stats.ustats.outpkts++;
-		ip_vs_stats.ustats.outbytes += skb->len;
-		spin_unlock(&ip_vs_stats.lock);
+		s = this_cpu_ptr(dest->svc->stats.cpustats);
+		s->ustats.outpkts++;
+		u64_stats_update_begin(&s->syncp);
+		s->ustats.outbytes += skb->len;
+		u64_stats_update_end(&s->syncp);
+
+		s = this_cpu_ptr(ipvs->cpustats);
+		s->ustats.outpkts++;
+		u64_stats_update_begin(&s->syncp);
+		s->ustats.outbytes += skb->len;
+		u64_stats_update_end(&s->syncp);
 	}
 }
 
@@ -153,41 +174,44 @@
 static inline void
 ip_vs_conn_stats(struct ip_vs_conn *cp, struct ip_vs_service *svc)
 {
-	spin_lock(&cp->dest->stats.lock);
-	cp->dest->stats.ustats.conns++;
-	spin_unlock(&cp->dest->stats.lock);
+	struct netns_ipvs *ipvs = net_ipvs(svc->net);
+	struct ip_vs_cpu_stats *s;
 
-	spin_lock(&svc->stats.lock);
-	svc->stats.ustats.conns++;
-	spin_unlock(&svc->stats.lock);
+	s = this_cpu_ptr(cp->dest->stats.cpustats);
+	s->ustats.conns++;
 
-	spin_lock(&ip_vs_stats.lock);
-	ip_vs_stats.ustats.conns++;
-	spin_unlock(&ip_vs_stats.lock);
+	s = this_cpu_ptr(svc->stats.cpustats);
+	s->ustats.conns++;
+
+	s = this_cpu_ptr(ipvs->cpustats);
+	s->ustats.conns++;
 }
 
 
 static inline int
 ip_vs_set_state(struct ip_vs_conn *cp, int direction,
 		const struct sk_buff *skb,
-		struct ip_vs_protocol *pp)
+		struct ip_vs_proto_data *pd)
 {
-	if (unlikely(!pp->state_transition))
+	if (unlikely(!pd->pp->state_transition))
 		return 0;
-	return pp->state_transition(cp, direction, skb, pp);
+	return pd->pp->state_transition(cp, direction, skb, pd);
 }
 
-static inline void
+static inline int
 ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
 			      struct sk_buff *skb, int protocol,
 			      const union nf_inet_addr *caddr, __be16 cport,
 			      const union nf_inet_addr *vaddr, __be16 vport,
 			      struct ip_vs_conn_param *p)
 {
-	ip_vs_conn_fill_param(svc->af, protocol, caddr, cport, vaddr, vport, p);
+	ip_vs_conn_fill_param(svc->net, svc->af, protocol, caddr, cport, vaddr,
+			      vport, p);
 	p->pe = svc->pe;
 	if (p->pe && p->pe->fill_param)
-		p->pe->fill_param(p, skb);
+		return p->pe->fill_param(p, skb);
+
+	return 0;
 }
 
 /*
@@ -200,7 +224,7 @@
 static struct ip_vs_conn *
 ip_vs_sched_persist(struct ip_vs_service *svc,
 		    struct sk_buff *skb,
-		    __be16 ports[2])
+		    __be16 src_port, __be16 dst_port, int *ignored)
 {
 	struct ip_vs_conn *cp = NULL;
 	struct ip_vs_iphdr iph;
@@ -224,8 +248,8 @@
 
 	IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u "
 		      "mnet %s\n",
-		      IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(ports[0]),
-		      IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(ports[1]),
+		      IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(src_port),
+		      IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(dst_port),
 		      IP_VS_DBG_ADDR(svc->af, &snet));
 
 	/*
@@ -247,14 +271,14 @@
 		const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) };
 		__be16 vport = 0;
 
-		if (ports[1] == svc->port) {
+		if (dst_port == svc->port) {
 			/* non-FTP template:
 			 * <protocol, caddr, 0, vaddr, vport, daddr, dport>
 			 * FTP template:
 			 * <protocol, caddr, 0, vaddr, 0, daddr, 0>
 			 */
 			if (svc->port != FTPPORT)
-				vport = ports[1];
+				vport = dst_port;
 		} else {
 			/* Note: persistent fwmark-based services and
 			 * persistent port zero service are handled here.
@@ -268,24 +292,31 @@
 				vaddr = &fwmark;
 			}
 		}
-		ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
-					      vaddr, vport, &param);
+		/* return *ignored = -1 so NF_DROP can be used */
+		if (ip_vs_conn_fill_param_persist(svc, skb, protocol, &snet, 0,
+						  vaddr, vport, &param) < 0) {
+			*ignored = -1;
+			return NULL;
+		}
 	}
 
 	/* Check if a template already exists */
 	ct = ip_vs_ct_in_get(&param);
 	if (!ct || !ip_vs_check_template(ct)) {
-		/* No template found or the dest of the connection
+		/*
+		 * No template found or the dest of the connection
 		 * template is not available.
+		 * return *ignored=0 i.e. ICMP and NF_DROP
 		 */
 		dest = svc->scheduler->schedule(svc, skb);
 		if (!dest) {
 			IP_VS_DBG(1, "p-schedule: no dest found.\n");
 			kfree(param.pe_data);
+			*ignored = 0;
 			return NULL;
 		}
 
-		if (ports[1] == svc->port && svc->port != FTPPORT)
+		if (dst_port == svc->port && svc->port != FTPPORT)
 			dport = dest->port;
 
 		/* Create a template
@@ -293,9 +324,10 @@
 		 * and thus param.pe_data will be destroyed
 		 * when the template expires */
 		ct = ip_vs_conn_new(&param, &dest->addr, dport,
-				    IP_VS_CONN_F_TEMPLATE, dest);
+				    IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
 		if (ct == NULL) {
 			kfree(param.pe_data);
+			*ignored = -1;
 			return NULL;
 		}
 
@@ -306,7 +338,7 @@
 		kfree(param.pe_data);
 	}
 
-	dport = ports[1];
+	dport = dst_port;
 	if (dport == svc->port && dest->port)
 		dport = dest->port;
 
@@ -317,11 +349,13 @@
 	/*
 	 *    Create a new connection according to the template
 	 */
-	ip_vs_conn_fill_param(svc->af, iph.protocol, &iph.saddr, ports[0],
-			      &iph.daddr, ports[1], &param);
-	cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest);
+	ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, &iph.saddr,
+			      src_port, &iph.daddr, dst_port, &param);
+
+	cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest, skb->mark);
 	if (cp == NULL) {
 		ip_vs_conn_put(ct);
+		*ignored = -1;
 		return NULL;
 	}
 
@@ -341,11 +375,27 @@
  *  It selects a server according to the virtual service, and
  *  creates a connection entry.
  *  Protocols supported: TCP, UDP
+ *
+ *  Usage of *ignored
+ *
+ * 1 :   protocol tried to schedule (eg. on SYN), found svc but the
+ *       svc/scheduler decides that this packet should be accepted with
+ *       NF_ACCEPT because it must not be scheduled.
+ *
+ * 0 :   scheduler can not find destination, so try bypass or
+ *       return ICMP and then NF_DROP (ip_vs_leave).
+ *
+ * -1 :  scheduler tried to schedule but fatal error occurred, eg.
+ *       ip_vs_conn_new failure (ENOMEM) or ip_vs_sip_fill_param
+ *       failure such as missing Call-ID, ENOMEM on skb_linearize
+ *       or pe_data. In this case we should return NF_DROP without
+ *       any attempts to send ICMP with ip_vs_leave.
  */
 struct ip_vs_conn *
 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
-	       struct ip_vs_protocol *pp, int *ignored)
+	       struct ip_vs_proto_data *pd, int *ignored)
 {
+	struct ip_vs_protocol *pp = pd->pp;
 	struct ip_vs_conn *cp = NULL;
 	struct ip_vs_iphdr iph;
 	struct ip_vs_dest *dest;
@@ -371,12 +421,10 @@
 	}
 
 	/*
-	 * Do not schedule replies from local real server. It is risky
-	 * for fwmark services but mostly for persistent services.
+	 *    Do not schedule replies from local real server.
 	 */
 	if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
-	    (svc->flags & IP_VS_SVC_F_PERSISTENT || svc->fwmark) &&
-	    (cp = pp->conn_in_get(svc->af, skb, pp, &iph, iph.len, 1))) {
+	    (cp = pp->conn_in_get(svc->af, skb, &iph, iph.len, 1))) {
 		IP_VS_DBG_PKT(12, svc->af, pp, skb, 0,
 			      "Not scheduling reply for existing connection");
 		__ip_vs_conn_put(cp);
@@ -386,10 +434,10 @@
 	/*
 	 *    Persistent service
 	 */
-	if (svc->flags & IP_VS_SVC_F_PERSISTENT) {
-		*ignored = 0;
-		return ip_vs_sched_persist(svc, skb, pptr);
-	}
+	if (svc->flags & IP_VS_SVC_F_PERSISTENT)
+		return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored);
+
+	*ignored = 0;
 
 	/*
 	 *    Non-persistent service
@@ -402,8 +450,6 @@
 		return NULL;
 	}
 
-	*ignored = 0;
-
 	dest = svc->scheduler->schedule(svc, skb);
 	if (dest == NULL) {
 		IP_VS_DBG(1, "Schedule: no dest found.\n");
@@ -419,13 +465,17 @@
 	 */
 	{
 		struct ip_vs_conn_param p;
-		ip_vs_conn_fill_param(svc->af, iph.protocol, &iph.saddr,
-				      pptr[0], &iph.daddr, pptr[1], &p);
+
+		ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol,
+				      &iph.saddr, pptr[0], &iph.daddr, pptr[1],
+				      &p);
 		cp = ip_vs_conn_new(&p, &dest->addr,
 				    dest->port ? dest->port : pptr[1],
-				    flags, dest);
-		if (!cp)
+				    flags, dest, skb->mark);
+		if (!cp) {
+			*ignored = -1;
 			return NULL;
+		}
 	}
 
 	IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
@@ -447,11 +497,14 @@
  *  no destination is available for a new connection.
  */
 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
-		struct ip_vs_protocol *pp)
+		struct ip_vs_proto_data *pd)
 {
+	struct net *net;
+	struct netns_ipvs *ipvs;
 	__be16 _ports[2], *pptr;
 	struct ip_vs_iphdr iph;
 	int unicast;
+
 	ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
 
 	pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
@@ -459,18 +512,20 @@
 		ip_vs_service_put(svc);
 		return NF_DROP;
 	}
+	net = skb_net(skb);
 
 #ifdef CONFIG_IP_VS_IPV6
 	if (svc->af == AF_INET6)
 		unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST;
 	else
 #endif
-		unicast = (inet_addr_type(&init_net, iph.daddr.ip) == RTN_UNICAST);
+		unicast = (inet_addr_type(net, iph.daddr.ip) == RTN_UNICAST);
 
 	/* if it is fwmark-based service, the cache_bypass sysctl is up
 	   and the destination is a non-local unicast, then create
 	   a cache_bypass connection entry */
-	if (sysctl_ip_vs_cache_bypass && svc->fwmark && unicast) {
+	ipvs = net_ipvs(net);
+	if (ipvs->sysctl_cache_bypass && svc->fwmark && unicast) {
 		int ret, cs;
 		struct ip_vs_conn *cp;
 		unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
@@ -484,12 +539,12 @@
 		IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
 		{
 			struct ip_vs_conn_param p;
-			ip_vs_conn_fill_param(svc->af, iph.protocol,
+			ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol,
 					      &iph.saddr, pptr[0],
 					      &iph.daddr, pptr[1], &p);
 			cp = ip_vs_conn_new(&p, &daddr, 0,
 					    IP_VS_CONN_F_BYPASS | flags,
-					    NULL);
+					    NULL, skb->mark);
 			if (!cp)
 				return NF_DROP;
 		}
@@ -498,10 +553,10 @@
 		ip_vs_in_stats(cp, skb);
 
 		/* set state */
-		cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp);
+		cs = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
 
 		/* transmit the first SYN packet */
-		ret = cp->packet_xmit(skb, cp, pp);
+		ret = cp->packet_xmit(skb, cp, pd->pp);
 		/* do not touch skb anymore */
 
 		atomic_inc(&cp->in_pkts);
@@ -682,6 +737,7 @@
 				struct ip_vs_protocol *pp,
 				unsigned int offset, unsigned int ihl)
 {
+	struct netns_ipvs *ipvs;
 	unsigned int verdict = NF_DROP;
 
 	if (IP_VS_FWD_METHOD(cp) != 0) {
@@ -703,6 +759,8 @@
 	if (!skb_make_writable(skb, offset))
 		goto out;
 
+	ipvs = net_ipvs(skb_net(skb));
+
 #ifdef CONFIG_IP_VS_IPV6
 	if (af == AF_INET6)
 		ip_vs_nat_icmp_v6(skb, pp, cp, 1);
@@ -712,11 +770,11 @@
 
 #ifdef CONFIG_IP_VS_IPV6
 	if (af == AF_INET6) {
-		if (sysctl_ip_vs_snat_reroute && ip6_route_me_harder(skb) != 0)
+		if (ipvs->sysctl_snat_reroute && ip6_route_me_harder(skb) != 0)
 			goto out;
 	} else
 #endif
-		if ((sysctl_ip_vs_snat_reroute ||
+		if ((ipvs->sysctl_snat_reroute ||
 		     skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
 		    ip_route_me_harder(skb, RTN_LOCAL) != 0)
 			goto out;
@@ -808,7 +866,7 @@
 
 	ip_vs_fill_iphdr(AF_INET, cih, &ciph);
 	/* The embedded headers contain source and dest in reverse order */
-	cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1);
+	cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1);
 	if (!cp)
 		return NF_ACCEPT;
 
@@ -885,7 +943,7 @@
 
 	ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
 	/* The embedded headers contain source and dest in reverse order */
-	cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1);
+	cp = pp->conn_out_get(AF_INET6, skb, &ciph, offset, 1);
 	if (!cp)
 		return NF_ACCEPT;
 
@@ -924,9 +982,12 @@
  * Used for NAT and local client.
  */
 static unsigned int
-handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
 		struct ip_vs_conn *cp, int ihl)
 {
+	struct ip_vs_protocol *pp = pd->pp;
+	struct netns_ipvs *ipvs;
+
 	IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet");
 
 	if (!skb_make_writable(skb, ihl))
@@ -961,13 +1022,15 @@
 	 * if it came from this machine itself.  So re-compute
 	 * the routing information.
 	 */
+	ipvs = net_ipvs(skb_net(skb));
+
 #ifdef CONFIG_IP_VS_IPV6
 	if (af == AF_INET6) {
-		if (sysctl_ip_vs_snat_reroute && ip6_route_me_harder(skb) != 0)
+		if (ipvs->sysctl_snat_reroute && ip6_route_me_harder(skb) != 0)
 			goto drop;
 	} else
 #endif
-		if ((sysctl_ip_vs_snat_reroute ||
+		if ((ipvs->sysctl_snat_reroute ||
 		     skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
 		    ip_route_me_harder(skb, RTN_LOCAL) != 0)
 			goto drop;
@@ -975,7 +1038,7 @@
 	IP_VS_DBG_PKT(10, af, pp, skb, 0, "After SNAT");
 
 	ip_vs_out_stats(cp, skb);
-	ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp);
+	ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
 	skb->ipvs_property = 1;
 	if (!(cp->flags & IP_VS_CONN_F_NFCT))
 		ip_vs_notrack(skb);
@@ -999,9 +1062,12 @@
 static unsigned int
 ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
 {
+	struct net *net = NULL;
 	struct ip_vs_iphdr iph;
 	struct ip_vs_protocol *pp;
+	struct ip_vs_proto_data *pd;
 	struct ip_vs_conn *cp;
+	struct netns_ipvs *ipvs;
 
 	EnterFunction(11);
 
@@ -1022,6 +1088,7 @@
 	if (unlikely(!skb_dst(skb)))
 		return NF_ACCEPT;
 
+	net = skb_net(skb);
 	ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
 #ifdef CONFIG_IP_VS_IPV6
 	if (af == AF_INET6) {
@@ -1045,9 +1112,10 @@
 			ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
 		}
 
-	pp = ip_vs_proto_get(iph.protocol);
-	if (unlikely(!pp))
+	pd = ip_vs_proto_data_get(net, iph.protocol);
+	if (unlikely(!pd))
 		return NF_ACCEPT;
+	pp = pd->pp;
 
 	/* reassemble IP fragments */
 #ifdef CONFIG_IP_VS_IPV6
@@ -1073,11 +1141,12 @@
 	/*
 	 * Check if the packet belongs to an existing entry
 	 */
-	cp = pp->conn_out_get(af, skb, pp, &iph, iph.len, 0);
+	cp = pp->conn_out_get(af, skb, &iph, iph.len, 0);
+	ipvs = net_ipvs(net);
 
 	if (likely(cp))
-		return handle_response(af, skb, pp, cp, iph.len);
-	if (sysctl_ip_vs_nat_icmp_send &&
+		return handle_response(af, skb, pd, cp, iph.len);
+	if (ipvs->sysctl_nat_icmp_send &&
 	    (pp->protocol == IPPROTO_TCP ||
 	     pp->protocol == IPPROTO_UDP ||
 	     pp->protocol == IPPROTO_SCTP)) {
@@ -1087,7 +1156,7 @@
 					  sizeof(_ports), _ports);
 		if (pptr == NULL)
 			return NF_ACCEPT;	/* Not for me */
-		if (ip_vs_lookup_real_service(af, iph.protocol,
+		if (ip_vs_lookup_real_service(net, af, iph.protocol,
 					      &iph.saddr,
 					      pptr[0])) {
 			/*
@@ -1202,12 +1271,14 @@
 static int
 ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
 {
+	struct net *net = NULL;
 	struct iphdr *iph;
 	struct icmphdr	_icmph, *ic;
 	struct iphdr	_ciph, *cih;	/* The ip header contained within the ICMP */
 	struct ip_vs_iphdr ciph;
 	struct ip_vs_conn *cp;
 	struct ip_vs_protocol *pp;
+	struct ip_vs_proto_data *pd;
 	unsigned int offset, ihl, verdict;
 	union nf_inet_addr snet;
 
@@ -1249,9 +1320,11 @@
 	if (cih == NULL)
 		return NF_ACCEPT; /* The packet looks wrong, ignore */
 
-	pp = ip_vs_proto_get(cih->protocol);
-	if (!pp)
+	net = skb_net(skb);
+	pd = ip_vs_proto_data_get(net, cih->protocol);
+	if (!pd)
 		return NF_ACCEPT;
+	pp = pd->pp;
 
 	/* Is the embedded protocol header present? */
 	if (unlikely(cih->frag_off & htons(IP_OFFSET) &&
@@ -1265,10 +1338,10 @@
 
 	ip_vs_fill_iphdr(AF_INET, cih, &ciph);
 	/* The embedded headers contain source and dest in reverse order */
-	cp = pp->conn_in_get(AF_INET, skb, pp, &ciph, offset, 1);
+	cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, 1);
 	if (!cp) {
 		/* The packet could also belong to a local client */
-		cp = pp->conn_out_get(AF_INET, skb, pp, &ciph, offset, 1);
+		cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1);
 		if (cp) {
 			snet.ip = iph->saddr;
 			return handle_response_icmp(AF_INET, skb, &snet,
@@ -1312,6 +1385,7 @@
 static int
 ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
 {
+	struct net *net = NULL;
 	struct ipv6hdr *iph;
 	struct icmp6hdr	_icmph, *ic;
 	struct ipv6hdr	_ciph, *cih;	/* The ip header contained
@@ -1319,6 +1393,7 @@
 	struct ip_vs_iphdr ciph;
 	struct ip_vs_conn *cp;
 	struct ip_vs_protocol *pp;
+	struct ip_vs_proto_data *pd;
 	unsigned int offset, verdict;
 	union nf_inet_addr snet;
 	struct rt6_info *rt;
@@ -1361,9 +1436,11 @@
 	if (cih == NULL)
 		return NF_ACCEPT; /* The packet looks wrong, ignore */
 
-	pp = ip_vs_proto_get(cih->nexthdr);
-	if (!pp)
+	net = skb_net(skb);
+	pd = ip_vs_proto_data_get(net, cih->nexthdr);
+	if (!pd)
 		return NF_ACCEPT;
+	pp = pd->pp;
 
 	/* Is the embedded protocol header present? */
 	/* TODO: we don't support fragmentation at the moment anyways */
@@ -1377,10 +1454,10 @@
 
 	ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
 	/* The embedded headers contain source and dest in reverse order */
-	cp = pp->conn_in_get(AF_INET6, skb, pp, &ciph, offset, 1);
+	cp = pp->conn_in_get(AF_INET6, skb, &ciph, offset, 1);
 	if (!cp) {
 		/* The packet could also belong to a local client */
-		cp = pp->conn_out_get(AF_INET6, skb, pp, &ciph, offset, 1);
+		cp = pp->conn_out_get(AF_INET6, skb, &ciph, offset, 1);
 		if (cp) {
 			ipv6_addr_copy(&snet.in6, &iph->saddr);
 			return handle_response_icmp(AF_INET6, skb, &snet,
@@ -1423,10 +1500,13 @@
 static unsigned int
 ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
 {
+	struct net *net;
 	struct ip_vs_iphdr iph;
 	struct ip_vs_protocol *pp;
+	struct ip_vs_proto_data *pd;
 	struct ip_vs_conn *cp;
 	int ret, restart, pkts;
+	struct netns_ipvs *ipvs;
 
 	/* Already marked as IPVS request or reply? */
 	if (skb->ipvs_property)
@@ -1480,20 +1560,21 @@
 			ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
 		}
 
+	net = skb_net(skb);
 	/* Protocol supported? */
-	pp = ip_vs_proto_get(iph.protocol);
-	if (unlikely(!pp))
+	pd = ip_vs_proto_data_get(net, iph.protocol);
+	if (unlikely(!pd))
 		return NF_ACCEPT;
-
+	pp = pd->pp;
 	/*
 	 * Check if the packet belongs to an existing connection entry
 	 */
-	cp = pp->conn_in_get(af, skb, pp, &iph, iph.len, 0);
+	cp = pp->conn_in_get(af, skb, &iph, iph.len, 0);
 
 	if (unlikely(!cp)) {
 		int v;
 
-		if (!pp->conn_schedule(af, skb, pp, &v, &cp))
+		if (!pp->conn_schedule(af, skb, pd, &v, &cp))
 			return v;
 	}
 
@@ -1505,12 +1586,13 @@
 	}
 
 	IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
-
+	net = skb_net(skb);
+	ipvs = net_ipvs(net);
 	/* Check the server status */
 	if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
 		/* the destination server is not available */
 
-		if (sysctl_ip_vs_expire_nodest_conn) {
+		if (ipvs->sysctl_expire_nodest_conn) {
 			/* try to expire the connection immediately */
 			ip_vs_conn_expire_now(cp);
 		}
@@ -1521,7 +1603,7 @@
 	}
 
 	ip_vs_in_stats(cp, skb);
-	restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pp);
+	restart = ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd);
 	if (cp->packet_xmit)
 		ret = cp->packet_xmit(skb, cp, pp);
 		/* do not touch skb anymore */
@@ -1535,35 +1617,41 @@
 	 *
 	 * Sync connection if it is about to close to
 	 * encorage the standby servers to update the connections timeout
+	 *
+	 * For ONE_PKT let ip_vs_sync_conn() do the filter work.
 	 */
-	pkts = atomic_add_return(1, &cp->in_pkts);
-	if (af == AF_INET && (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
+
+	if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+		pkts = ipvs->sysctl_sync_threshold[0];
+	else
+		pkts = atomic_add_return(1, &cp->in_pkts);
+
+	if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
 	    cp->protocol == IPPROTO_SCTP) {
 		if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
-			(pkts % sysctl_ip_vs_sync_threshold[1]
-			 == sysctl_ip_vs_sync_threshold[0])) ||
+			(pkts % ipvs->sysctl_sync_threshold[1]
+			 == ipvs->sysctl_sync_threshold[0])) ||
 				(cp->old_state != cp->state &&
 				 ((cp->state == IP_VS_SCTP_S_CLOSED) ||
 				  (cp->state == IP_VS_SCTP_S_SHUT_ACK_CLI) ||
 				  (cp->state == IP_VS_SCTP_S_SHUT_ACK_SER)))) {
-			ip_vs_sync_conn(cp);
+			ip_vs_sync_conn(net, cp);
 			goto out;
 		}
 	}
 
 	/* Keep this block last: TCP and others with pp->num_states <= 1 */
-	else if (af == AF_INET &&
-	    (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
+	else if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
 	    (((cp->protocol != IPPROTO_TCP ||
 	       cp->state == IP_VS_TCP_S_ESTABLISHED) &&
-	      (pkts % sysctl_ip_vs_sync_threshold[1]
-	       == sysctl_ip_vs_sync_threshold[0])) ||
+	      (pkts % ipvs->sysctl_sync_threshold[1]
+	       == ipvs->sysctl_sync_threshold[0])) ||
 	     ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
 	      ((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
 	       (cp->state == IP_VS_TCP_S_CLOSE) ||
 	       (cp->state == IP_VS_TCP_S_CLOSE_WAIT) ||
 	       (cp->state == IP_VS_TCP_S_TIME_WAIT)))))
-		ip_vs_sync_conn(cp);
+		ip_vs_sync_conn(net, cp);
 out:
 	cp->old_state = cp->state;
 
@@ -1782,7 +1870,39 @@
 	},
 #endif
 };
+/*
+ *	Initialize IP Virtual Server netns mem.
+ */
+static int __net_init __ip_vs_init(struct net *net)
+{
+	struct netns_ipvs *ipvs;
 
+	ipvs = net_generic(net, ip_vs_net_id);
+	if (ipvs == NULL) {
+		pr_err("%s(): no memory.\n", __func__);
+		return -ENOMEM;
+	}
+	ipvs->net = net;
+	/* Counters used for creating unique names */
+	ipvs->gen = atomic_read(&ipvs_netns_cnt);
+	atomic_inc(&ipvs_netns_cnt);
+	net->ipvs = ipvs;
+	printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
+			 sizeof(struct netns_ipvs), ipvs->gen);
+	return 0;
+}
+
+static void __net_exit __ip_vs_cleanup(struct net *net)
+{
+	IP_VS_DBG(10, "ipvs netns %d released\n", net_ipvs(net)->gen);
+}
+
+static struct pernet_operations ipvs_core_ops = {
+	.init = __ip_vs_init,
+	.exit = __ip_vs_cleanup,
+	.id   = &ip_vs_net_id,
+	.size = sizeof(struct netns_ipvs),
+};
 
 /*
  *	Initialize IP Virtual Server
@@ -1791,8 +1911,11 @@
 {
 	int ret;
 
-	ip_vs_estimator_init();
+	ret = register_pernet_subsys(&ipvs_core_ops);	/* Alloc ip_vs struct */
+	if (ret < 0)
+		return ret;
 
+	ip_vs_estimator_init();
 	ret = ip_vs_control_init();
 	if (ret < 0) {
 		pr_err("can't setup control.\n");
@@ -1813,15 +1936,23 @@
 		goto cleanup_app;
 	}
 
+	ret = ip_vs_sync_init();
+	if (ret < 0) {
+		pr_err("can't setup sync data.\n");
+		goto cleanup_conn;
+	}
+
 	ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
 	if (ret < 0) {
 		pr_err("can't register hooks.\n");
-		goto cleanup_conn;
+		goto cleanup_sync;
 	}
 
 	pr_info("ipvs loaded.\n");
 	return ret;
 
+cleanup_sync:
+	ip_vs_sync_cleanup();
   cleanup_conn:
 	ip_vs_conn_cleanup();
   cleanup_app:
@@ -1831,17 +1962,20 @@
 	ip_vs_control_cleanup();
   cleanup_estimator:
 	ip_vs_estimator_cleanup();
+	unregister_pernet_subsys(&ipvs_core_ops);	/* free ip_vs struct */
 	return ret;
 }
 
 static void __exit ip_vs_cleanup(void)
 {
 	nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
+	ip_vs_sync_cleanup();
 	ip_vs_conn_cleanup();
 	ip_vs_app_cleanup();
 	ip_vs_protocol_cleanup();
 	ip_vs_control_cleanup();
 	ip_vs_estimator_cleanup();
+	unregister_pernet_subsys(&ipvs_core_ops);	/* free ip_vs struct */
 	pr_info("ipvs unloaded.\n");
 }
 
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 22f7ad5..c73b0c8 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -38,6 +38,7 @@
 #include <linux/mutex.h>
 
 #include <net/net_namespace.h>
+#include <linux/nsproxy.h>
 #include <net/ip.h>
 #ifdef CONFIG_IP_VS_IPV6
 #include <net/ipv6.h>
@@ -57,42 +58,7 @@
 /* lock for service table */
 static DEFINE_RWLOCK(__ip_vs_svc_lock);
 
-/* lock for table with the real services */
-static DEFINE_RWLOCK(__ip_vs_rs_lock);
-
-/* lock for state and timeout tables */
-static DEFINE_SPINLOCK(ip_vs_securetcp_lock);
-
-/* lock for drop entry handling */
-static DEFINE_SPINLOCK(__ip_vs_dropentry_lock);
-
-/* lock for drop packet handling */
-static DEFINE_SPINLOCK(__ip_vs_droppacket_lock);
-
-/* 1/rate drop and drop-entry variables */
-int ip_vs_drop_rate = 0;
-int ip_vs_drop_counter = 0;
-static atomic_t ip_vs_dropentry = ATOMIC_INIT(0);
-
-/* number of virtual services */
-static int ip_vs_num_services = 0;
-
 /* sysctl variables */
-static int sysctl_ip_vs_drop_entry = 0;
-static int sysctl_ip_vs_drop_packet = 0;
-static int sysctl_ip_vs_secure_tcp = 0;
-static int sysctl_ip_vs_amemthresh = 1024;
-static int sysctl_ip_vs_am_droprate = 10;
-int sysctl_ip_vs_cache_bypass = 0;
-int sysctl_ip_vs_expire_nodest_conn = 0;
-int sysctl_ip_vs_expire_quiescent_template = 0;
-int sysctl_ip_vs_sync_threshold[2] = { 3, 50 };
-int sysctl_ip_vs_nat_icmp_send = 0;
-#ifdef CONFIG_IP_VS_NFCT
-int sysctl_ip_vs_conntrack;
-#endif
-int sysctl_ip_vs_snat_reroute = 1;
-
 
 #ifdef CONFIG_IP_VS_DEBUG
 static int sysctl_ip_vs_debug_level = 0;
@@ -105,7 +71,8 @@
 
 #ifdef CONFIG_IP_VS_IPV6
 /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
-static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
+static int __ip_vs_addr_is_local_v6(struct net *net,
+				    const struct in6_addr *addr)
 {
 	struct rt6_info *rt;
 	struct flowi fl = {
@@ -114,7 +81,7 @@
 		.fl6_src = { .s6_addr32 = {0, 0, 0, 0} },
 	};
 
-	rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
+	rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl);
 	if (rt && rt->rt6i_dev && (rt->rt6i_dev->flags & IFF_LOOPBACK))
 			return 1;
 
@@ -125,7 +92,7 @@
  *	update_defense_level is called from keventd and from sysctl,
  *	so it needs to protect itself from softirqs
  */
-static void update_defense_level(void)
+static void update_defense_level(struct netns_ipvs *ipvs)
 {
 	struct sysinfo i;
 	static int old_secure_tcp = 0;
@@ -141,73 +108,73 @@
 	/* si_swapinfo(&i); */
 	/* availmem = availmem - (i.totalswap - i.freeswap); */
 
-	nomem = (availmem < sysctl_ip_vs_amemthresh);
+	nomem = (availmem < ipvs->sysctl_amemthresh);
 
 	local_bh_disable();
 
 	/* drop_entry */
-	spin_lock(&__ip_vs_dropentry_lock);
-	switch (sysctl_ip_vs_drop_entry) {
+	spin_lock(&ipvs->dropentry_lock);
+	switch (ipvs->sysctl_drop_entry) {
 	case 0:
-		atomic_set(&ip_vs_dropentry, 0);
+		atomic_set(&ipvs->dropentry, 0);
 		break;
 	case 1:
 		if (nomem) {
-			atomic_set(&ip_vs_dropentry, 1);
-			sysctl_ip_vs_drop_entry = 2;
+			atomic_set(&ipvs->dropentry, 1);
+			ipvs->sysctl_drop_entry = 2;
 		} else {
-			atomic_set(&ip_vs_dropentry, 0);
+			atomic_set(&ipvs->dropentry, 0);
 		}
 		break;
 	case 2:
 		if (nomem) {
-			atomic_set(&ip_vs_dropentry, 1);
+			atomic_set(&ipvs->dropentry, 1);
 		} else {
-			atomic_set(&ip_vs_dropentry, 0);
-			sysctl_ip_vs_drop_entry = 1;
+			atomic_set(&ipvs->dropentry, 0);
+			ipvs->sysctl_drop_entry = 1;
 		};
 		break;
 	case 3:
-		atomic_set(&ip_vs_dropentry, 1);
+		atomic_set(&ipvs->dropentry, 1);
 		break;
 	}
-	spin_unlock(&__ip_vs_dropentry_lock);
+	spin_unlock(&ipvs->dropentry_lock);
 
 	/* drop_packet */
-	spin_lock(&__ip_vs_droppacket_lock);
-	switch (sysctl_ip_vs_drop_packet) {
+	spin_lock(&ipvs->droppacket_lock);
+	switch (ipvs->sysctl_drop_packet) {
 	case 0:
-		ip_vs_drop_rate = 0;
+		ipvs->drop_rate = 0;
 		break;
 	case 1:
 		if (nomem) {
-			ip_vs_drop_rate = ip_vs_drop_counter
-				= sysctl_ip_vs_amemthresh /
-				(sysctl_ip_vs_amemthresh-availmem);
-			sysctl_ip_vs_drop_packet = 2;
+			ipvs->drop_rate = ipvs->drop_counter
+				= ipvs->sysctl_amemthresh /
+				(ipvs->sysctl_amemthresh-availmem);
+			ipvs->sysctl_drop_packet = 2;
 		} else {
-			ip_vs_drop_rate = 0;
+			ipvs->drop_rate = 0;
 		}
 		break;
 	case 2:
 		if (nomem) {
-			ip_vs_drop_rate = ip_vs_drop_counter
-				= sysctl_ip_vs_amemthresh /
-				(sysctl_ip_vs_amemthresh-availmem);
+			ipvs->drop_rate = ipvs->drop_counter
+				= ipvs->sysctl_amemthresh /
+				(ipvs->sysctl_amemthresh-availmem);
 		} else {
-			ip_vs_drop_rate = 0;
-			sysctl_ip_vs_drop_packet = 1;
+			ipvs->drop_rate = 0;
+			ipvs->sysctl_drop_packet = 1;
 		}
 		break;
 	case 3:
-		ip_vs_drop_rate = sysctl_ip_vs_am_droprate;
+		ipvs->drop_rate = ipvs->sysctl_am_droprate;
 		break;
 	}
-	spin_unlock(&__ip_vs_droppacket_lock);
+	spin_unlock(&ipvs->droppacket_lock);
 
 	/* secure_tcp */
-	spin_lock(&ip_vs_securetcp_lock);
-	switch (sysctl_ip_vs_secure_tcp) {
+	spin_lock(&ipvs->securetcp_lock);
+	switch (ipvs->sysctl_secure_tcp) {
 	case 0:
 		if (old_secure_tcp >= 2)
 			to_change = 0;
@@ -216,7 +183,7 @@
 		if (nomem) {
 			if (old_secure_tcp < 2)
 				to_change = 1;
-			sysctl_ip_vs_secure_tcp = 2;
+			ipvs->sysctl_secure_tcp = 2;
 		} else {
 			if (old_secure_tcp >= 2)
 				to_change = 0;
@@ -229,7 +196,7 @@
 		} else {
 			if (old_secure_tcp >= 2)
 				to_change = 0;
-			sysctl_ip_vs_secure_tcp = 1;
+			ipvs->sysctl_secure_tcp = 1;
 		}
 		break;
 	case 3:
@@ -237,10 +204,11 @@
 			to_change = 1;
 		break;
 	}
-	old_secure_tcp = sysctl_ip_vs_secure_tcp;
+	old_secure_tcp = ipvs->sysctl_secure_tcp;
 	if (to_change >= 0)
-		ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1);
-	spin_unlock(&ip_vs_securetcp_lock);
+		ip_vs_protocol_timeout_change(ipvs,
+					      ipvs->sysctl_secure_tcp > 1);
+	spin_unlock(&ipvs->securetcp_lock);
 
 	local_bh_enable();
 }
@@ -250,16 +218,16 @@
  *	Timer for checking the defense
  */
 #define DEFENSE_TIMER_PERIOD	1*HZ
-static void defense_work_handler(struct work_struct *work);
-static DECLARE_DELAYED_WORK(defense_work, defense_work_handler);
 
 static void defense_work_handler(struct work_struct *work)
 {
-	update_defense_level();
-	if (atomic_read(&ip_vs_dropentry))
-		ip_vs_random_dropentry();
+	struct netns_ipvs *ipvs =
+		container_of(work, struct netns_ipvs, defense_work.work);
 
-	schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD);
+	update_defense_level(ipvs);
+	if (atomic_read(&ipvs->dropentry))
+		ip_vs_random_dropentry(ipvs->net);
+	schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
 }
 
 int
@@ -287,33 +255,13 @@
 /* the service table hashed by fwmark */
 static struct list_head ip_vs_svc_fwm_table[IP_VS_SVC_TAB_SIZE];
 
-/*
- *	Hash table: for real service lookups
- */
-#define IP_VS_RTAB_BITS 4
-#define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
-#define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
-
-static struct list_head ip_vs_rtable[IP_VS_RTAB_SIZE];
-
-/*
- *	Trash for destinations
- */
-static LIST_HEAD(ip_vs_dest_trash);
-
-/*
- *	FTP & NULL virtual service counters
- */
-static atomic_t ip_vs_ftpsvc_counter = ATOMIC_INIT(0);
-static atomic_t ip_vs_nullsvc_counter = ATOMIC_INIT(0);
-
 
 /*
  *	Returns hash value for virtual service
  */
-static __inline__ unsigned
-ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
-		  __be16 port)
+static inline unsigned
+ip_vs_svc_hashkey(struct net *net, int af, unsigned proto,
+		  const union nf_inet_addr *addr, __be16 port)
 {
 	register unsigned porth = ntohs(port);
 	__be32 addr_fold = addr->ip;
@@ -323,6 +271,7 @@
 		addr_fold = addr->ip6[0]^addr->ip6[1]^
 			    addr->ip6[2]^addr->ip6[3];
 #endif
+	addr_fold ^= ((size_t)net>>8);
 
 	return (proto^ntohl(addr_fold)^(porth>>IP_VS_SVC_TAB_BITS)^porth)
 		& IP_VS_SVC_TAB_MASK;
@@ -331,13 +280,13 @@
 /*
  *	Returns hash value of fwmark for virtual service lookup
  */
-static __inline__ unsigned ip_vs_svc_fwm_hashkey(__u32 fwmark)
+static inline unsigned ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
 {
-	return fwmark & IP_VS_SVC_TAB_MASK;
+	return (((size_t)net>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK;
 }
 
 /*
- *	Hashes a service in the ip_vs_svc_table by <proto,addr,port>
+ *	Hashes a service in the ip_vs_svc_table by <netns,proto,addr,port>
  *	or in the ip_vs_svc_fwm_table by fwmark.
  *	Should be called with locked tables.
  */
@@ -353,16 +302,16 @@
 
 	if (svc->fwmark == 0) {
 		/*
-		 *  Hash it by <protocol,addr,port> in ip_vs_svc_table
+		 *  Hash it by <netns,protocol,addr,port> in ip_vs_svc_table
 		 */
-		hash = ip_vs_svc_hashkey(svc->af, svc->protocol, &svc->addr,
-					 svc->port);
+		hash = ip_vs_svc_hashkey(svc->net, svc->af, svc->protocol,
+					 &svc->addr, svc->port);
 		list_add(&svc->s_list, &ip_vs_svc_table[hash]);
 	} else {
 		/*
-		 *  Hash it by fwmark in ip_vs_svc_fwm_table
+		 *  Hash it by fwmark in svc_fwm_table
 		 */
-		hash = ip_vs_svc_fwm_hashkey(svc->fwmark);
+		hash = ip_vs_svc_fwm_hashkey(svc->net, svc->fwmark);
 		list_add(&svc->f_list, &ip_vs_svc_fwm_table[hash]);
 	}
 
@@ -374,7 +323,7 @@
 
 
 /*
- *	Unhashes a service from ip_vs_svc_table/ip_vs_svc_fwm_table.
+ *	Unhashes a service from svc_table / svc_fwm_table.
  *	Should be called with locked tables.
  */
 static int ip_vs_svc_unhash(struct ip_vs_service *svc)
@@ -386,10 +335,10 @@
 	}
 
 	if (svc->fwmark == 0) {
-		/* Remove it from the ip_vs_svc_table table */
+		/* Remove it from the svc_table table */
 		list_del(&svc->s_list);
 	} else {
-		/* Remove it from the ip_vs_svc_fwm_table table */
+		/* Remove it from the svc_fwm_table table */
 		list_del(&svc->f_list);
 	}
 
@@ -400,23 +349,24 @@
 
 
 /*
- *	Get service by {proto,addr,port} in the service table.
+ *	Get service by {netns, proto,addr,port} in the service table.
  */
 static inline struct ip_vs_service *
-__ip_vs_service_find(int af, __u16 protocol, const union nf_inet_addr *vaddr,
-		    __be16 vport)
+__ip_vs_service_find(struct net *net, int af, __u16 protocol,
+		     const union nf_inet_addr *vaddr, __be16 vport)
 {
 	unsigned hash;
 	struct ip_vs_service *svc;
 
 	/* Check for "full" addressed entries */
-	hash = ip_vs_svc_hashkey(af, protocol, vaddr, vport);
+	hash = ip_vs_svc_hashkey(net, af, protocol, vaddr, vport);
 
 	list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){
 		if ((svc->af == af)
 		    && ip_vs_addr_equal(af, &svc->addr, vaddr)
 		    && (svc->port == vport)
-		    && (svc->protocol == protocol)) {
+		    && (svc->protocol == protocol)
+		    && net_eq(svc->net, net)) {
 			/* HIT */
 			return svc;
 		}
@@ -430,16 +380,17 @@
  *	Get service by {fwmark} in the service table.
  */
 static inline struct ip_vs_service *
-__ip_vs_svc_fwm_find(int af, __u32 fwmark)
+__ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark)
 {
 	unsigned hash;
 	struct ip_vs_service *svc;
 
 	/* Check for fwmark addressed entries */
-	hash = ip_vs_svc_fwm_hashkey(fwmark);
+	hash = ip_vs_svc_fwm_hashkey(net, fwmark);
 
 	list_for_each_entry(svc, &ip_vs_svc_fwm_table[hash], f_list) {
-		if (svc->fwmark == fwmark && svc->af == af) {
+		if (svc->fwmark == fwmark && svc->af == af
+		    && net_eq(svc->net, net)) {
 			/* HIT */
 			return svc;
 		}
@@ -449,42 +400,44 @@
 }
 
 struct ip_vs_service *
-ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
+ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol,
 		  const union nf_inet_addr *vaddr, __be16 vport)
 {
 	struct ip_vs_service *svc;
+	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	read_lock(&__ip_vs_svc_lock);
 
 	/*
 	 *	Check the table hashed by fwmark first
 	 */
-	if (fwmark && (svc = __ip_vs_svc_fwm_find(af, fwmark)))
+	svc = __ip_vs_svc_fwm_find(net, af, fwmark);
+	if (fwmark && svc)
 		goto out;
 
 	/*
 	 *	Check the table hashed by <protocol,addr,port>
 	 *	for "full" addressed entries
 	 */
-	svc = __ip_vs_service_find(af, protocol, vaddr, vport);
+	svc = __ip_vs_service_find(net, af, protocol, vaddr, vport);
 
 	if (svc == NULL
 	    && protocol == IPPROTO_TCP
-	    && atomic_read(&ip_vs_ftpsvc_counter)
+	    && atomic_read(&ipvs->ftpsvc_counter)
 	    && (vport == FTPDATA || ntohs(vport) >= PROT_SOCK)) {
 		/*
 		 * Check if ftp service entry exists, the packet
 		 * might belong to FTP data connections.
 		 */
-		svc = __ip_vs_service_find(af, protocol, vaddr, FTPPORT);
+		svc = __ip_vs_service_find(net, af, protocol, vaddr, FTPPORT);
 	}
 
 	if (svc == NULL
-	    && atomic_read(&ip_vs_nullsvc_counter)) {
+	    && atomic_read(&ipvs->nullsvc_counter)) {
 		/*
 		 * Check if the catch-all port (port zero) exists
 		 */
-		svc = __ip_vs_service_find(af, protocol, vaddr, 0);
+		svc = __ip_vs_service_find(net, af, protocol, vaddr, 0);
 	}
 
   out:
@@ -519,6 +472,7 @@
 			      svc->fwmark,
 			      IP_VS_DBG_ADDR(svc->af, &svc->addr),
 			      ntohs(svc->port), atomic_read(&svc->usecnt));
+		free_percpu(svc->stats.cpustats);
 		kfree(svc);
 	}
 }
@@ -545,10 +499,10 @@
 }
 
 /*
- *	Hashes ip_vs_dest in ip_vs_rtable by <proto,addr,port>.
+ *	Hashes ip_vs_dest in rs_table by <proto,addr,port>.
  *	should be called with locked tables.
  */
-static int ip_vs_rs_hash(struct ip_vs_dest *dest)
+static int ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest)
 {
 	unsigned hash;
 
@@ -562,19 +516,19 @@
 	 */
 	hash = ip_vs_rs_hashkey(dest->af, &dest->addr, dest->port);
 
-	list_add(&dest->d_list, &ip_vs_rtable[hash]);
+	list_add(&dest->d_list, &ipvs->rs_table[hash]);
 
 	return 1;
 }
 
 /*
- *	UNhashes ip_vs_dest from ip_vs_rtable.
+ *	UNhashes ip_vs_dest from rs_table.
  *	should be called with locked tables.
  */
 static int ip_vs_rs_unhash(struct ip_vs_dest *dest)
 {
 	/*
-	 * Remove it from the ip_vs_rtable table.
+	 * Remove it from the rs_table table.
 	 */
 	if (!list_empty(&dest->d_list)) {
 		list_del(&dest->d_list);
@@ -588,10 +542,11 @@
  *	Lookup real service by <proto,addr,port> in the real service table.
  */
 struct ip_vs_dest *
-ip_vs_lookup_real_service(int af, __u16 protocol,
+ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol,
 			  const union nf_inet_addr *daddr,
 			  __be16 dport)
 {
+	struct netns_ipvs *ipvs = net_ipvs(net);
 	unsigned hash;
 	struct ip_vs_dest *dest;
 
@@ -601,19 +556,19 @@
 	 */
 	hash = ip_vs_rs_hashkey(af, daddr, dport);
 
-	read_lock(&__ip_vs_rs_lock);
-	list_for_each_entry(dest, &ip_vs_rtable[hash], d_list) {
+	read_lock(&ipvs->rs_lock);
+	list_for_each_entry(dest, &ipvs->rs_table[hash], d_list) {
 		if ((dest->af == af)
 		    && ip_vs_addr_equal(af, &dest->addr, daddr)
 		    && (dest->port == dport)
 		    && ((dest->protocol == protocol) ||
 			dest->vfwmark)) {
 			/* HIT */
-			read_unlock(&__ip_vs_rs_lock);
+			read_unlock(&ipvs->rs_lock);
 			return dest;
 		}
 	}
-	read_unlock(&__ip_vs_rs_lock);
+	read_unlock(&ipvs->rs_lock);
 
 	return NULL;
 }
@@ -652,15 +607,16 @@
  * ip_vs_lookup_real_service() looked promissing, but
  * seems not working as expected.
  */
-struct ip_vs_dest *ip_vs_find_dest(int af, const union nf_inet_addr *daddr,
+struct ip_vs_dest *ip_vs_find_dest(struct net  *net, int af,
+				   const union nf_inet_addr *daddr,
 				   __be16 dport,
 				   const union nf_inet_addr *vaddr,
-				   __be16 vport, __u16 protocol)
+				   __be16 vport, __u16 protocol, __u32 fwmark)
 {
 	struct ip_vs_dest *dest;
 	struct ip_vs_service *svc;
 
-	svc = ip_vs_service_get(af, 0, protocol, vaddr, vport);
+	svc = ip_vs_service_get(net, af, fwmark, protocol, vaddr, vport);
 	if (!svc)
 		return NULL;
 	dest = ip_vs_lookup_dest(svc, daddr, dport);
@@ -685,11 +641,12 @@
 		     __be16 dport)
 {
 	struct ip_vs_dest *dest, *nxt;
+	struct netns_ipvs *ipvs = net_ipvs(svc->net);
 
 	/*
 	 * Find the destination in trash
 	 */
-	list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) {
+	list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, n_list) {
 		IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, "
 			      "dest->refcnt=%d\n",
 			      dest->vfwmark,
@@ -720,6 +677,7 @@
 			list_del(&dest->n_list);
 			ip_vs_dst_reset(dest);
 			__ip_vs_unbind_svc(dest);
+			free_percpu(dest->stats.cpustats);
 			kfree(dest);
 		}
 	}
@@ -737,14 +695,16 @@
  *  are expired, and the refcnt of each destination in the trash must
  *  be 1, so we simply release them here.
  */
-static void ip_vs_trash_cleanup(void)
+static void ip_vs_trash_cleanup(struct net *net)
 {
 	struct ip_vs_dest *dest, *nxt;
+	struct netns_ipvs *ipvs = net_ipvs(net);
 
-	list_for_each_entry_safe(dest, nxt, &ip_vs_dest_trash, n_list) {
+	list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, n_list) {
 		list_del(&dest->n_list);
 		ip_vs_dst_reset(dest);
 		__ip_vs_unbind_svc(dest);
+		free_percpu(dest->stats.cpustats);
 		kfree(dest);
 	}
 }
@@ -768,6 +728,7 @@
 __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
 		    struct ip_vs_dest_user_kern *udest, int add)
 {
+	struct netns_ipvs *ipvs = net_ipvs(svc->net);
 	int conn_flags;
 
 	/* set the weight and the flags */
@@ -780,12 +741,12 @@
 		conn_flags |= IP_VS_CONN_F_NOOUTPUT;
 	} else {
 		/*
-		 *    Put the real service in ip_vs_rtable if not present.
+		 *    Put the real service in rs_table if not present.
 		 *    For now only for NAT!
 		 */
-		write_lock_bh(&__ip_vs_rs_lock);
-		ip_vs_rs_hash(dest);
-		write_unlock_bh(&__ip_vs_rs_lock);
+		write_lock_bh(&ipvs->rs_lock);
+		ip_vs_rs_hash(ipvs, dest);
+		write_unlock_bh(&ipvs->rs_lock);
 	}
 	atomic_set(&dest->conn_flags, conn_flags);
 
@@ -813,7 +774,7 @@
 	spin_unlock(&dest->dst_lock);
 
 	if (add)
-		ip_vs_new_estimator(&dest->stats);
+		ip_vs_new_estimator(svc->net, &dest->stats);
 
 	write_lock_bh(&__ip_vs_svc_lock);
 
@@ -850,12 +811,12 @@
 		atype = ipv6_addr_type(&udest->addr.in6);
 		if ((!(atype & IPV6_ADDR_UNICAST) ||
 			atype & IPV6_ADDR_LINKLOCAL) &&
-			!__ip_vs_addr_is_local_v6(&udest->addr.in6))
+			!__ip_vs_addr_is_local_v6(svc->net, &udest->addr.in6))
 			return -EINVAL;
 	} else
 #endif
 	{
-		atype = inet_addr_type(&init_net, udest->addr.ip);
+		atype = inet_addr_type(svc->net, udest->addr.ip);
 		if (atype != RTN_LOCAL && atype != RTN_UNICAST)
 			return -EINVAL;
 	}
@@ -865,6 +826,11 @@
 		pr_err("%s(): no memory.\n", __func__);
 		return -ENOMEM;
 	}
+	dest->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
+	if (!dest->stats.cpustats) {
+		pr_err("%s() alloc_percpu failed\n", __func__);
+		goto err_alloc;
+	}
 
 	dest->af = svc->af;
 	dest->protocol = svc->protocol;
@@ -888,6 +854,10 @@
 
 	LeaveFunction(2);
 	return 0;
+
+err_alloc:
+	kfree(dest);
+	return -ENOMEM;
 }
 
 
@@ -1006,16 +976,18 @@
 /*
  *	Delete a destination (must be already unlinked from the service)
  */
-static void __ip_vs_del_dest(struct ip_vs_dest *dest)
+static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest)
 {
-	ip_vs_kill_estimator(&dest->stats);
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
+	ip_vs_kill_estimator(net, &dest->stats);
 
 	/*
 	 *  Remove it from the d-linked list with the real services.
 	 */
-	write_lock_bh(&__ip_vs_rs_lock);
+	write_lock_bh(&ipvs->rs_lock);
 	ip_vs_rs_unhash(dest);
-	write_unlock_bh(&__ip_vs_rs_lock);
+	write_unlock_bh(&ipvs->rs_lock);
 
 	/*
 	 *  Decrease the refcnt of the dest, and free the dest
@@ -1034,6 +1006,7 @@
 		   and only one user context can update virtual service at a
 		   time, so the operation here is OK */
 		atomic_dec(&dest->svc->refcnt);
+		free_percpu(dest->stats.cpustats);
 		kfree(dest);
 	} else {
 		IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, "
@@ -1041,7 +1014,7 @@
 			      IP_VS_DBG_ADDR(dest->af, &dest->addr),
 			      ntohs(dest->port),
 			      atomic_read(&dest->refcnt));
-		list_add(&dest->n_list, &ip_vs_dest_trash);
+		list_add(&dest->n_list, &ipvs->dest_trash);
 		atomic_inc(&dest->refcnt);
 	}
 }
@@ -1105,7 +1078,7 @@
 	/*
 	 *	Delete the destination
 	 */
-	__ip_vs_del_dest(dest);
+	__ip_vs_del_dest(svc->net, dest);
 
 	LeaveFunction(2);
 
@@ -1117,13 +1090,14 @@
  *	Add a service into the service hash table
  */
 static int
-ip_vs_add_service(struct ip_vs_service_user_kern *u,
+ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
 		  struct ip_vs_service **svc_p)
 {
 	int ret = 0;
 	struct ip_vs_scheduler *sched = NULL;
 	struct ip_vs_pe *pe = NULL;
 	struct ip_vs_service *svc = NULL;
+	struct netns_ipvs *ipvs = net_ipvs(net);
 
 	/* increase the module use count */
 	ip_vs_use_count_inc();
@@ -1137,7 +1111,7 @@
 	}
 
 	if (u->pe_name && *u->pe_name) {
-		pe = ip_vs_pe_get(u->pe_name);
+		pe = ip_vs_pe_getbyname(u->pe_name);
 		if (pe == NULL) {
 			pr_info("persistence engine module ip_vs_pe_%s "
 				"not found\n", u->pe_name);
@@ -1159,6 +1133,11 @@
 		ret = -ENOMEM;
 		goto out_err;
 	}
+	svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
+	if (!svc->stats.cpustats) {
+		pr_err("%s() alloc_percpu failed\n", __func__);
+		goto out_err;
+	}
 
 	/* I'm the first user of the service */
 	atomic_set(&svc->usecnt, 0);
@@ -1172,6 +1151,7 @@
 	svc->flags = u->flags;
 	svc->timeout = u->timeout * HZ;
 	svc->netmask = u->netmask;
+	svc->net = net;
 
 	INIT_LIST_HEAD(&svc->destinations);
 	rwlock_init(&svc->sched_lock);
@@ -1189,15 +1169,15 @@
 
 	/* Update the virtual service counters */
 	if (svc->port == FTPPORT)
-		atomic_inc(&ip_vs_ftpsvc_counter);
+		atomic_inc(&ipvs->ftpsvc_counter);
 	else if (svc->port == 0)
-		atomic_inc(&ip_vs_nullsvc_counter);
+		atomic_inc(&ipvs->nullsvc_counter);
 
-	ip_vs_new_estimator(&svc->stats);
+	ip_vs_new_estimator(net, &svc->stats);
 
 	/* Count only IPv4 services for old get/setsockopt interface */
 	if (svc->af == AF_INET)
-		ip_vs_num_services++;
+		ipvs->num_services++;
 
 	/* Hash the service into the service table */
 	write_lock_bh(&__ip_vs_svc_lock);
@@ -1207,6 +1187,7 @@
 	*svc_p = svc;
 	return 0;
 
+
  out_err:
 	if (svc != NULL) {
 		ip_vs_unbind_scheduler(svc);
@@ -1215,6 +1196,8 @@
 			ip_vs_app_inc_put(svc->inc);
 			local_bh_enable();
 		}
+		if (svc->stats.cpustats)
+			free_percpu(svc->stats.cpustats);
 		kfree(svc);
 	}
 	ip_vs_scheduler_put(sched);
@@ -1248,7 +1231,7 @@
 	old_sched = sched;
 
 	if (u->pe_name && *u->pe_name) {
-		pe = ip_vs_pe_get(u->pe_name);
+		pe = ip_vs_pe_getbyname(u->pe_name);
 		if (pe == NULL) {
 			pr_info("persistence engine module ip_vs_pe_%s "
 				"not found\n", u->pe_name);
@@ -1334,14 +1317,15 @@
 	struct ip_vs_dest *dest, *nxt;
 	struct ip_vs_scheduler *old_sched;
 	struct ip_vs_pe *old_pe;
+	struct netns_ipvs *ipvs = net_ipvs(svc->net);
 
 	pr_info("%s: enter\n", __func__);
 
 	/* Count only IPv4 services for old get/setsockopt interface */
 	if (svc->af == AF_INET)
-		ip_vs_num_services--;
+		ipvs->num_services--;
 
-	ip_vs_kill_estimator(&svc->stats);
+	ip_vs_kill_estimator(svc->net, &svc->stats);
 
 	/* Unbind scheduler */
 	old_sched = svc->scheduler;
@@ -1364,16 +1348,16 @@
 	 */
 	list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) {
 		__ip_vs_unlink_dest(svc, dest, 0);
-		__ip_vs_del_dest(dest);
+		__ip_vs_del_dest(svc->net, dest);
 	}
 
 	/*
 	 *    Update the virtual service counters
 	 */
 	if (svc->port == FTPPORT)
-		atomic_dec(&ip_vs_ftpsvc_counter);
+		atomic_dec(&ipvs->ftpsvc_counter);
 	else if (svc->port == 0)
-		atomic_dec(&ip_vs_nullsvc_counter);
+		atomic_dec(&ipvs->nullsvc_counter);
 
 	/*
 	 *    Free the service if nobody refers to it
@@ -1383,6 +1367,7 @@
 			      svc->fwmark,
 			      IP_VS_DBG_ADDR(svc->af, &svc->addr),
 			      ntohs(svc->port), atomic_read(&svc->usecnt));
+		free_percpu(svc->stats.cpustats);
 		kfree(svc);
 	}
 
@@ -1428,17 +1413,19 @@
 /*
  *	Flush all the virtual services
  */
-static int ip_vs_flush(void)
+static int ip_vs_flush(struct net *net)
 {
 	int idx;
 	struct ip_vs_service *svc, *nxt;
 
 	/*
-	 * Flush the service table hashed by <protocol,addr,port>
+	 * Flush the service table hashed by <netns,protocol,addr,port>
 	 */
 	for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
-		list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx], s_list) {
-			ip_vs_unlink_service(svc);
+		list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx],
+					 s_list) {
+			if (net_eq(svc->net, net))
+				ip_vs_unlink_service(svc);
 		}
 	}
 
@@ -1448,7 +1435,8 @@
 	for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		list_for_each_entry_safe(svc, nxt,
 					 &ip_vs_svc_fwm_table[idx], f_list) {
-			ip_vs_unlink_service(svc);
+			if (net_eq(svc->net, net))
+				ip_vs_unlink_service(svc);
 		}
 	}
 
@@ -1472,24 +1460,26 @@
 	return 0;
 }
 
-static int ip_vs_zero_all(void)
+static int ip_vs_zero_all(struct net *net)
 {
 	int idx;
 	struct ip_vs_service *svc;
 
 	for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
-			ip_vs_zero_service(svc);
+			if (net_eq(svc->net, net))
+				ip_vs_zero_service(svc);
 		}
 	}
 
 	for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
-			ip_vs_zero_service(svc);
+			if (net_eq(svc->net, net))
+				ip_vs_zero_service(svc);
 		}
 	}
 
-	ip_vs_zero_stats(&ip_vs_stats);
+	ip_vs_zero_stats(net_ipvs(net)->tot_stats);
 	return 0;
 }
 
@@ -1498,6 +1488,7 @@
 proc_do_defense_mode(ctl_table *table, int write,
 		     void __user *buffer, size_t *lenp, loff_t *ppos)
 {
+	struct net *net = current->nsproxy->net_ns;
 	int *valp = table->data;
 	int val = *valp;
 	int rc;
@@ -1508,7 +1499,7 @@
 			/* Restore the correct value */
 			*valp = val;
 		} else {
-			update_defense_level();
+			update_defense_level(net_ipvs(net));
 		}
 	}
 	return rc;
@@ -1534,15 +1525,111 @@
 	return rc;
 }
 
+static int
+proc_do_sync_mode(ctl_table *table, int write,
+		     void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	int *valp = table->data;
+	int val = *valp;
+	int rc;
+
+	rc = proc_dointvec(table, write, buffer, lenp, ppos);
+	if (write && (*valp != val)) {
+		if ((*valp < 0) || (*valp > 1)) {
+			/* Restore the correct value */
+			*valp = val;
+		} else {
+			struct net *net = current->nsproxy->net_ns;
+			ip_vs_sync_switch_mode(net, val);
+		}
+	}
+	return rc;
+}
 
 /*
  *	IPVS sysctl table (under the /proc/sys/net/ipv4/vs/)
+ *	Do not change order or insert new entries without
+ *	align with netns init in __ip_vs_control_init()
  */
 
 static struct ctl_table vs_vars[] = {
 	{
 		.procname	= "amemthresh",
-		.data		= &sysctl_ip_vs_amemthresh,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "am_droprate",
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "drop_entry",
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_do_defense_mode,
+	},
+	{
+		.procname	= "drop_packet",
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_do_defense_mode,
+	},
+#ifdef CONFIG_IP_VS_NFCT
+	{
+		.procname	= "conntrack",
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+#endif
+	{
+		.procname	= "secure_tcp",
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_do_defense_mode,
+	},
+	{
+		.procname	= "snat_reroute",
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_dointvec,
+	},
+	{
+		.procname	= "sync_version",
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= &proc_do_sync_mode,
+	},
+	{
+		.procname	= "cache_bypass",
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "expire_nodest_conn",
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "expire_quiescent_template",
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
+		.procname	= "sync_threshold",
+		.maxlen		=
+			sizeof(((struct netns_ipvs *)0)->sysctl_sync_threshold),
+		.mode		= 0644,
+		.proc_handler	= proc_do_sync_threshold,
+	},
+	{
+		.procname	= "nat_icmp_send",
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
@@ -1556,50 +1643,6 @@
 		.proc_handler	= proc_dointvec,
 	},
 #endif
-	{
-		.procname	= "am_droprate",
-		.data		= &sysctl_ip_vs_am_droprate,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
-	},
-	{
-		.procname	= "drop_entry",
-		.data		= &sysctl_ip_vs_drop_entry,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_do_defense_mode,
-	},
-	{
-		.procname	= "drop_packet",
-		.data		= &sysctl_ip_vs_drop_packet,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_do_defense_mode,
-	},
-#ifdef CONFIG_IP_VS_NFCT
-	{
-		.procname	= "conntrack",
-		.data		= &sysctl_ip_vs_conntrack,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec,
-	},
-#endif
-	{
-		.procname	= "secure_tcp",
-		.data		= &sysctl_ip_vs_secure_tcp,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_do_defense_mode,
-	},
-	{
-		.procname	= "snat_reroute",
-		.data		= &sysctl_ip_vs_snat_reroute,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= &proc_dointvec,
-	},
 #if 0
 	{
 		.procname	= "timeout_established",
@@ -1686,41 +1729,6 @@
 		.proc_handler	= proc_dointvec_jiffies,
 	},
 #endif
-	{
-		.procname	= "cache_bypass",
-		.data		= &sysctl_ip_vs_cache_bypass,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
-	},
-	{
-		.procname	= "expire_nodest_conn",
-		.data		= &sysctl_ip_vs_expire_nodest_conn,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
-	},
-	{
-		.procname	= "expire_quiescent_template",
-		.data		= &sysctl_ip_vs_expire_quiescent_template,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
-	},
-	{
-		.procname	= "sync_threshold",
-		.data		= &sysctl_ip_vs_sync_threshold,
-		.maxlen		= sizeof(sysctl_ip_vs_sync_threshold),
-		.mode		= 0644,
-		.proc_handler	= proc_do_sync_threshold,
-	},
-	{
-		.procname	= "nat_icmp_send",
-		.data		= &sysctl_ip_vs_nat_icmp_send,
-		.maxlen		= sizeof(int),
-		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
-	},
 	{ }
 };
 
@@ -1732,11 +1740,10 @@
 };
 EXPORT_SYMBOL_GPL(net_vs_ctl_path);
 
-static struct ctl_table_header * sysctl_header;
-
 #ifdef CONFIG_PROC_FS
 
 struct ip_vs_iter {
+	struct seq_net_private p;  /* Do not move this, netns depends upon it*/
 	struct list_head *table;
 	int bucket;
 };
@@ -1763,6 +1770,7 @@
 /* Get the Nth entry in the two lists */
 static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
 {
+	struct net *net = seq_file_net(seq);
 	struct ip_vs_iter *iter = seq->private;
 	int idx;
 	struct ip_vs_service *svc;
@@ -1770,7 +1778,7 @@
 	/* look in hash by protocol */
 	for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
-			if (pos-- == 0){
+			if (net_eq(svc->net, net) && pos-- == 0) {
 				iter->table = ip_vs_svc_table;
 				iter->bucket = idx;
 				return svc;
@@ -1781,7 +1789,7 @@
 	/* keep looking in fwmark */
 	for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
-			if (pos-- == 0) {
+			if (net_eq(svc->net, net) && pos-- == 0) {
 				iter->table = ip_vs_svc_fwm_table;
 				iter->bucket = idx;
 				return svc;
@@ -1935,7 +1943,7 @@
 
 static int ip_vs_info_open(struct inode *inode, struct file *file)
 {
-	return seq_open_private(file, &ip_vs_info_seq_ops,
+	return seq_open_net(inode, file, &ip_vs_info_seq_ops,
 			sizeof(struct ip_vs_iter));
 }
 
@@ -1949,13 +1957,11 @@
 
 #endif
 
-struct ip_vs_stats ip_vs_stats = {
-	.lock = __SPIN_LOCK_UNLOCKED(ip_vs_stats.lock),
-};
-
 #ifdef CONFIG_PROC_FS
 static int ip_vs_stats_show(struct seq_file *seq, void *v)
 {
+	struct net *net = seq_file_single_net(seq);
+	struct ip_vs_stats *tot_stats = net_ipvs(net)->tot_stats;
 
 /*               01234567 01234567 01234567 0123456701234567 0123456701234567 */
 	seq_puts(seq,
@@ -1963,29 +1969,29 @@
 	seq_printf(seq,
 		   "   Conns  Packets  Packets            Bytes            Bytes\n");
 
-	spin_lock_bh(&ip_vs_stats.lock);
-	seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", ip_vs_stats.ustats.conns,
-		   ip_vs_stats.ustats.inpkts, ip_vs_stats.ustats.outpkts,
-		   (unsigned long long) ip_vs_stats.ustats.inbytes,
-		   (unsigned long long) ip_vs_stats.ustats.outbytes);
+	spin_lock_bh(&tot_stats->lock);
+	seq_printf(seq, "%8X %8X %8X %16LX %16LX\n\n", tot_stats->ustats.conns,
+		   tot_stats->ustats.inpkts, tot_stats->ustats.outpkts,
+		   (unsigned long long) tot_stats->ustats.inbytes,
+		   (unsigned long long) tot_stats->ustats.outbytes);
 
 /*                 01234567 01234567 01234567 0123456701234567 0123456701234567 */
 	seq_puts(seq,
 		   " Conns/s   Pkts/s   Pkts/s          Bytes/s          Bytes/s\n");
 	seq_printf(seq,"%8X %8X %8X %16X %16X\n",
-			ip_vs_stats.ustats.cps,
-			ip_vs_stats.ustats.inpps,
-			ip_vs_stats.ustats.outpps,
-			ip_vs_stats.ustats.inbps,
-			ip_vs_stats.ustats.outbps);
-	spin_unlock_bh(&ip_vs_stats.lock);
+			tot_stats->ustats.cps,
+			tot_stats->ustats.inpps,
+			tot_stats->ustats.outpps,
+			tot_stats->ustats.inbps,
+			tot_stats->ustats.outbps);
+	spin_unlock_bh(&tot_stats->lock);
 
 	return 0;
 }
 
 static int ip_vs_stats_seq_open(struct inode *inode, struct file *file)
 {
-	return single_open(file, ip_vs_stats_show, NULL);
+	return single_open_net(inode, file, ip_vs_stats_show);
 }
 
 static const struct file_operations ip_vs_stats_fops = {
@@ -1996,13 +2002,70 @@
 	.release = single_release,
 };
 
+static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
+{
+	struct net *net = seq_file_single_net(seq);
+	struct ip_vs_stats *tot_stats = net_ipvs(net)->tot_stats;
+	int i;
+
+/*               01234567 01234567 01234567 0123456701234567 0123456701234567 */
+	seq_puts(seq,
+		 "       Total Incoming Outgoing         Incoming         Outgoing\n");
+	seq_printf(seq,
+		   "CPU    Conns  Packets  Packets            Bytes            Bytes\n");
+
+	for_each_possible_cpu(i) {
+		struct ip_vs_cpu_stats *u = per_cpu_ptr(net->ipvs->cpustats, i);
+		seq_printf(seq, "%3X %8X %8X %8X %16LX %16LX\n",
+			    i, u->ustats.conns, u->ustats.inpkts,
+			    u->ustats.outpkts, (__u64)u->ustats.inbytes,
+			    (__u64)u->ustats.outbytes);
+	}
+
+	spin_lock_bh(&tot_stats->lock);
+	seq_printf(seq, "  ~ %8X %8X %8X %16LX %16LX\n\n",
+		   tot_stats->ustats.conns, tot_stats->ustats.inpkts,
+		   tot_stats->ustats.outpkts,
+		   (unsigned long long) tot_stats->ustats.inbytes,
+		   (unsigned long long) tot_stats->ustats.outbytes);
+
+/*                 01234567 01234567 01234567 0123456701234567 0123456701234567 */
+	seq_puts(seq,
+		   "     Conns/s   Pkts/s   Pkts/s          Bytes/s          Bytes/s\n");
+	seq_printf(seq, "    %8X %8X %8X %16X %16X\n",
+			tot_stats->ustats.cps,
+			tot_stats->ustats.inpps,
+			tot_stats->ustats.outpps,
+			tot_stats->ustats.inbps,
+			tot_stats->ustats.outbps);
+	spin_unlock_bh(&tot_stats->lock);
+
+	return 0;
+}
+
+static int ip_vs_stats_percpu_seq_open(struct inode *inode, struct file *file)
+{
+	return single_open_net(inode, file, ip_vs_stats_percpu_show);
+}
+
+static const struct file_operations ip_vs_stats_percpu_fops = {
+	.owner = THIS_MODULE,
+	.open = ip_vs_stats_percpu_seq_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
 #endif
 
 /*
  *	Set timeout values for tcp tcpfin udp in the timeout_table.
  */
-static int ip_vs_set_timeout(struct ip_vs_timeout_user *u)
+static int ip_vs_set_timeout(struct net *net, struct ip_vs_timeout_user *u)
 {
+#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
+	struct ip_vs_proto_data *pd;
+#endif
+
 	IP_VS_DBG(2, "Setting timeout tcp:%d tcpfin:%d udp:%d\n",
 		  u->tcp_timeout,
 		  u->tcp_fin_timeout,
@@ -2010,19 +2073,22 @@
 
 #ifdef CONFIG_IP_VS_PROTO_TCP
 	if (u->tcp_timeout) {
-		ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_ESTABLISHED]
+		pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+		pd->timeout_table[IP_VS_TCP_S_ESTABLISHED]
 			= u->tcp_timeout * HZ;
 	}
 
 	if (u->tcp_fin_timeout) {
-		ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_FIN_WAIT]
+		pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+		pd->timeout_table[IP_VS_TCP_S_FIN_WAIT]
 			= u->tcp_fin_timeout * HZ;
 	}
 #endif
 
 #ifdef CONFIG_IP_VS_PROTO_UDP
 	if (u->udp_timeout) {
-		ip_vs_protocol_udp.timeout_table[IP_VS_UDP_S_NORMAL]
+		pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
+		pd->timeout_table[IP_VS_UDP_S_NORMAL]
 			= u->udp_timeout * HZ;
 	}
 #endif
@@ -2087,6 +2153,7 @@
 static int
 do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
 {
+	struct net *net = sock_net(sk);
 	int ret;
 	unsigned char arg[MAX_ARG_LEN];
 	struct ip_vs_service_user *usvc_compat;
@@ -2121,19 +2188,20 @@
 
 	if (cmd == IP_VS_SO_SET_FLUSH) {
 		/* Flush the virtual service */
-		ret = ip_vs_flush();
+		ret = ip_vs_flush(net);
 		goto out_unlock;
 	} else if (cmd == IP_VS_SO_SET_TIMEOUT) {
 		/* Set timeout values for (tcp tcpfin udp) */
-		ret = ip_vs_set_timeout((struct ip_vs_timeout_user *)arg);
+		ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg);
 		goto out_unlock;
 	} else if (cmd == IP_VS_SO_SET_STARTDAEMON) {
 		struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
-		ret = start_sync_thread(dm->state, dm->mcast_ifn, dm->syncid);
+		ret = start_sync_thread(net, dm->state, dm->mcast_ifn,
+					dm->syncid);
 		goto out_unlock;
 	} else if (cmd == IP_VS_SO_SET_STOPDAEMON) {
 		struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg;
-		ret = stop_sync_thread(dm->state);
+		ret = stop_sync_thread(net, dm->state);
 		goto out_unlock;
 	}
 
@@ -2148,7 +2216,7 @@
 	if (cmd == IP_VS_SO_SET_ZERO) {
 		/* if no service address is set, zero counters in all */
 		if (!usvc.fwmark && !usvc.addr.ip && !usvc.port) {
-			ret = ip_vs_zero_all();
+			ret = ip_vs_zero_all(net);
 			goto out_unlock;
 		}
 	}
@@ -2165,10 +2233,10 @@
 
 	/* Lookup the exact service by <protocol, addr, port> or fwmark */
 	if (usvc.fwmark == 0)
-		svc = __ip_vs_service_find(usvc.af, usvc.protocol,
+		svc = __ip_vs_service_find(net, usvc.af, usvc.protocol,
 					   &usvc.addr, usvc.port);
 	else
-		svc = __ip_vs_svc_fwm_find(usvc.af, usvc.fwmark);
+		svc = __ip_vs_svc_fwm_find(net, usvc.af, usvc.fwmark);
 
 	if (cmd != IP_VS_SO_SET_ADD
 	    && (svc == NULL || svc->protocol != usvc.protocol)) {
@@ -2181,7 +2249,7 @@
 		if (svc != NULL)
 			ret = -EEXIST;
 		else
-			ret = ip_vs_add_service(&usvc, &svc);
+			ret = ip_vs_add_service(net, &usvc, &svc);
 		break;
 	case IP_VS_SO_SET_EDIT:
 		ret = ip_vs_edit_service(svc, &usvc);
@@ -2241,7 +2309,8 @@
 }
 
 static inline int
-__ip_vs_get_service_entries(const struct ip_vs_get_services *get,
+__ip_vs_get_service_entries(struct net *net,
+			    const struct ip_vs_get_services *get,
 			    struct ip_vs_get_services __user *uptr)
 {
 	int idx, count=0;
@@ -2252,7 +2321,7 @@
 	for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
 			/* Only expose IPv4 entries to old interface */
-			if (svc->af != AF_INET)
+			if (svc->af != AF_INET || !net_eq(svc->net, net))
 				continue;
 
 			if (count >= get->num_services)
@@ -2271,7 +2340,7 @@
 	for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
 		list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
 			/* Only expose IPv4 entries to old interface */
-			if (svc->af != AF_INET)
+			if (svc->af != AF_INET || !net_eq(svc->net, net))
 				continue;
 
 			if (count >= get->num_services)
@@ -2291,7 +2360,7 @@
 }
 
 static inline int
-__ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
+__ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
 			 struct ip_vs_get_dests __user *uptr)
 {
 	struct ip_vs_service *svc;
@@ -2299,9 +2368,9 @@
 	int ret = 0;
 
 	if (get->fwmark)
-		svc = __ip_vs_svc_fwm_find(AF_INET, get->fwmark);
+		svc = __ip_vs_svc_fwm_find(net, AF_INET, get->fwmark);
 	else
-		svc = __ip_vs_service_find(AF_INET, get->protocol, &addr,
+		svc = __ip_vs_service_find(net, AF_INET, get->protocol, &addr,
 					   get->port);
 
 	if (svc) {
@@ -2336,17 +2405,21 @@
 }
 
 static inline void
-__ip_vs_get_timeouts(struct ip_vs_timeout_user *u)
+__ip_vs_get_timeouts(struct net *net, struct ip_vs_timeout_user *u)
 {
+#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP)
+	struct ip_vs_proto_data *pd;
+#endif
+
 #ifdef CONFIG_IP_VS_PROTO_TCP
-	u->tcp_timeout =
-		ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
-	u->tcp_fin_timeout =
-		ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ;
+	pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+	u->tcp_timeout = pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] / HZ;
+	u->tcp_fin_timeout = pd->timeout_table[IP_VS_TCP_S_FIN_WAIT] / HZ;
 #endif
 #ifdef CONFIG_IP_VS_PROTO_UDP
+	pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
 	u->udp_timeout =
-		ip_vs_protocol_udp.timeout_table[IP_VS_UDP_S_NORMAL] / HZ;
+			pd->timeout_table[IP_VS_UDP_S_NORMAL] / HZ;
 #endif
 }
 
@@ -2375,7 +2448,10 @@
 	unsigned char arg[128];
 	int ret = 0;
 	unsigned int copylen;
+	struct net *net = sock_net(sk);
+	struct netns_ipvs *ipvs = net_ipvs(net);
 
+	BUG_ON(!net);
 	if (!capable(CAP_NET_ADMIN))
 		return -EPERM;
 
@@ -2418,7 +2494,7 @@
 		struct ip_vs_getinfo info;
 		info.version = IP_VS_VERSION_CODE;
 		info.size = ip_vs_conn_tab_size;
-		info.num_services = ip_vs_num_services;
+		info.num_services = ipvs->num_services;
 		if (copy_to_user(user, &info, sizeof(info)) != 0)
 			ret = -EFAULT;
 	}
@@ -2437,7 +2513,7 @@
 			ret = -EINVAL;
 			goto out;
 		}
-		ret = __ip_vs_get_service_entries(get, user);
+		ret = __ip_vs_get_service_entries(net, get, user);
 	}
 	break;
 
@@ -2450,10 +2526,11 @@
 		entry = (struct ip_vs_service_entry *)arg;
 		addr.ip = entry->addr;
 		if (entry->fwmark)
-			svc = __ip_vs_svc_fwm_find(AF_INET, entry->fwmark);
+			svc = __ip_vs_svc_fwm_find(net, AF_INET, entry->fwmark);
 		else
-			svc = __ip_vs_service_find(AF_INET, entry->protocol,
-						   &addr, entry->port);
+			svc = __ip_vs_service_find(net, AF_INET,
+						   entry->protocol, &addr,
+						   entry->port);
 		if (svc) {
 			ip_vs_copy_service(entry, svc);
 			if (copy_to_user(user, entry, sizeof(*entry)) != 0)
@@ -2476,7 +2553,7 @@
 			ret = -EINVAL;
 			goto out;
 		}
-		ret = __ip_vs_get_dest_entries(get, user);
+		ret = __ip_vs_get_dest_entries(net, get, user);
 	}
 	break;
 
@@ -2484,7 +2561,7 @@
 	{
 		struct ip_vs_timeout_user t;
 
-		__ip_vs_get_timeouts(&t);
+		__ip_vs_get_timeouts(net, &t);
 		if (copy_to_user(user, &t, sizeof(t)) != 0)
 			ret = -EFAULT;
 	}
@@ -2495,15 +2572,17 @@
 		struct ip_vs_daemon_user d[2];
 
 		memset(&d, 0, sizeof(d));
-		if (ip_vs_sync_state & IP_VS_STATE_MASTER) {
+		if (ipvs->sync_state & IP_VS_STATE_MASTER) {
 			d[0].state = IP_VS_STATE_MASTER;
-			strlcpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn, sizeof(d[0].mcast_ifn));
-			d[0].syncid = ip_vs_master_syncid;
+			strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn,
+				sizeof(d[0].mcast_ifn));
+			d[0].syncid = ipvs->master_syncid;
 		}
-		if (ip_vs_sync_state & IP_VS_STATE_BACKUP) {
+		if (ipvs->sync_state & IP_VS_STATE_BACKUP) {
 			d[1].state = IP_VS_STATE_BACKUP;
-			strlcpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn, sizeof(d[1].mcast_ifn));
-			d[1].syncid = ip_vs_backup_syncid;
+			strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn,
+				sizeof(d[1].mcast_ifn));
+			d[1].syncid = ipvs->backup_syncid;
 		}
 		if (copy_to_user(user, &d, sizeof(d)) != 0)
 			ret = -EFAULT;
@@ -2542,6 +2621,7 @@
 	.name		= IPVS_GENL_NAME,
 	.version	= IPVS_GENL_VERSION,
 	.maxattr	= IPVS_CMD_MAX,
+	.netnsok        = true,         /* Make ipvsadm to work on netns */
 };
 
 /* Policy used for first-level command attributes */
@@ -2696,11 +2776,12 @@
 	int idx = 0, i;
 	int start = cb->args[0];
 	struct ip_vs_service *svc;
+	struct net *net = skb_sknet(skb);
 
 	mutex_lock(&__ip_vs_mutex);
 	for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
 		list_for_each_entry(svc, &ip_vs_svc_table[i], s_list) {
-			if (++idx <= start)
+			if (++idx <= start || !net_eq(svc->net, net))
 				continue;
 			if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
 				idx--;
@@ -2711,7 +2792,7 @@
 
 	for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
 		list_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) {
-			if (++idx <= start)
+			if (++idx <= start || !net_eq(svc->net, net))
 				continue;
 			if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
 				idx--;
@@ -2727,7 +2808,8 @@
 	return skb->len;
 }
 
-static int ip_vs_genl_parse_service(struct ip_vs_service_user_kern *usvc,
+static int ip_vs_genl_parse_service(struct net *net,
+				    struct ip_vs_service_user_kern *usvc,
 				    struct nlattr *nla, int full_entry,
 				    struct ip_vs_service **ret_svc)
 {
@@ -2770,9 +2852,9 @@
 	}
 
 	if (usvc->fwmark)
-		svc = __ip_vs_svc_fwm_find(usvc->af, usvc->fwmark);
+		svc = __ip_vs_svc_fwm_find(net, usvc->af, usvc->fwmark);
 	else
-		svc = __ip_vs_service_find(usvc->af, usvc->protocol,
+		svc = __ip_vs_service_find(net, usvc->af, usvc->protocol,
 					   &usvc->addr, usvc->port);
 	*ret_svc = svc;
 
@@ -2809,13 +2891,14 @@
 	return 0;
 }
 
-static struct ip_vs_service *ip_vs_genl_find_service(struct nlattr *nla)
+static struct ip_vs_service *ip_vs_genl_find_service(struct net *net,
+						     struct nlattr *nla)
 {
 	struct ip_vs_service_user_kern usvc;
 	struct ip_vs_service *svc;
 	int ret;
 
-	ret = ip_vs_genl_parse_service(&usvc, nla, 0, &svc);
+	ret = ip_vs_genl_parse_service(net, &usvc, nla, 0, &svc);
 	return ret ? ERR_PTR(ret) : svc;
 }
 
@@ -2883,6 +2966,7 @@
 	struct ip_vs_service *svc;
 	struct ip_vs_dest *dest;
 	struct nlattr *attrs[IPVS_CMD_ATTR_MAX + 1];
+	struct net *net = skb_sknet(skb);
 
 	mutex_lock(&__ip_vs_mutex);
 
@@ -2891,7 +2975,8 @@
 			IPVS_CMD_ATTR_MAX, ip_vs_cmd_policy))
 		goto out_err;
 
-	svc = ip_vs_genl_find_service(attrs[IPVS_CMD_ATTR_SERVICE]);
+
+	svc = ip_vs_genl_find_service(net, attrs[IPVS_CMD_ATTR_SERVICE]);
 	if (IS_ERR(svc) || svc == NULL)
 		goto out_err;
 
@@ -3005,20 +3090,23 @@
 static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
 				   struct netlink_callback *cb)
 {
+	struct net *net = skb_net(skb);
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
 	mutex_lock(&__ip_vs_mutex);
-	if ((ip_vs_sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
+	if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) {
 		if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER,
-					   ip_vs_master_mcast_ifn,
-					   ip_vs_master_syncid, cb) < 0)
+					   ipvs->master_mcast_ifn,
+					   ipvs->master_syncid, cb) < 0)
 			goto nla_put_failure;
 
 		cb->args[0] = 1;
 	}
 
-	if ((ip_vs_sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
+	if ((ipvs->sync_state & IP_VS_STATE_BACKUP) && !cb->args[1]) {
 		if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_BACKUP,
-					   ip_vs_backup_mcast_ifn,
-					   ip_vs_backup_syncid, cb) < 0)
+					   ipvs->backup_mcast_ifn,
+					   ipvs->backup_syncid, cb) < 0)
 			goto nla_put_failure;
 
 		cb->args[1] = 1;
@@ -3030,31 +3118,33 @@
 	return skb->len;
 }
 
-static int ip_vs_genl_new_daemon(struct nlattr **attrs)
+static int ip_vs_genl_new_daemon(struct net *net, struct nlattr **attrs)
 {
 	if (!(attrs[IPVS_DAEMON_ATTR_STATE] &&
 	      attrs[IPVS_DAEMON_ATTR_MCAST_IFN] &&
 	      attrs[IPVS_DAEMON_ATTR_SYNC_ID]))
 		return -EINVAL;
 
-	return start_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
+	return start_sync_thread(net,
+				 nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
 				 nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
 				 nla_get_u32(attrs[IPVS_DAEMON_ATTR_SYNC_ID]));
 }
 
-static int ip_vs_genl_del_daemon(struct nlattr **attrs)
+static int ip_vs_genl_del_daemon(struct net *net, struct nlattr **attrs)
 {
 	if (!attrs[IPVS_DAEMON_ATTR_STATE])
 		return -EINVAL;
 
-	return stop_sync_thread(nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
+	return stop_sync_thread(net,
+				nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
 }
 
-static int ip_vs_genl_set_config(struct nlattr **attrs)
+static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs)
 {
 	struct ip_vs_timeout_user t;
 
-	__ip_vs_get_timeouts(&t);
+	__ip_vs_get_timeouts(net, &t);
 
 	if (attrs[IPVS_CMD_ATTR_TIMEOUT_TCP])
 		t.tcp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_TCP]);
@@ -3066,7 +3156,7 @@
 	if (attrs[IPVS_CMD_ATTR_TIMEOUT_UDP])
 		t.udp_timeout = nla_get_u32(attrs[IPVS_CMD_ATTR_TIMEOUT_UDP]);
 
-	return ip_vs_set_timeout(&t);
+	return ip_vs_set_timeout(net, &t);
 }
 
 static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
@@ -3076,16 +3166,20 @@
 	struct ip_vs_dest_user_kern udest;
 	int ret = 0, cmd;
 	int need_full_svc = 0, need_full_dest = 0;
+	struct net *net;
+	struct netns_ipvs *ipvs;
 
+	net = skb_sknet(skb);
+	ipvs = net_ipvs(net);
 	cmd = info->genlhdr->cmd;
 
 	mutex_lock(&__ip_vs_mutex);
 
 	if (cmd == IPVS_CMD_FLUSH) {
-		ret = ip_vs_flush();
+		ret = ip_vs_flush(net);
 		goto out;
 	} else if (cmd == IPVS_CMD_SET_CONFIG) {
-		ret = ip_vs_genl_set_config(info->attrs);
+		ret = ip_vs_genl_set_config(net, info->attrs);
 		goto out;
 	} else if (cmd == IPVS_CMD_NEW_DAEMON ||
 		   cmd == IPVS_CMD_DEL_DAEMON) {
@@ -3101,13 +3195,13 @@
 		}
 
 		if (cmd == IPVS_CMD_NEW_DAEMON)
-			ret = ip_vs_genl_new_daemon(daemon_attrs);
+			ret = ip_vs_genl_new_daemon(net, daemon_attrs);
 		else
-			ret = ip_vs_genl_del_daemon(daemon_attrs);
+			ret = ip_vs_genl_del_daemon(net, daemon_attrs);
 		goto out;
 	} else if (cmd == IPVS_CMD_ZERO &&
 		   !info->attrs[IPVS_CMD_ATTR_SERVICE]) {
-		ret = ip_vs_zero_all();
+		ret = ip_vs_zero_all(net);
 		goto out;
 	}
 
@@ -3117,7 +3211,7 @@
 	if (cmd == IPVS_CMD_NEW_SERVICE || cmd == IPVS_CMD_SET_SERVICE)
 		need_full_svc = 1;
 
-	ret = ip_vs_genl_parse_service(&usvc,
+	ret = ip_vs_genl_parse_service(net, &usvc,
 				       info->attrs[IPVS_CMD_ATTR_SERVICE],
 				       need_full_svc, &svc);
 	if (ret)
@@ -3147,7 +3241,7 @@
 	switch (cmd) {
 	case IPVS_CMD_NEW_SERVICE:
 		if (svc == NULL)
-			ret = ip_vs_add_service(&usvc, &svc);
+			ret = ip_vs_add_service(net, &usvc, &svc);
 		else
 			ret = -EEXIST;
 		break;
@@ -3185,7 +3279,11 @@
 	struct sk_buff *msg;
 	void *reply;
 	int ret, cmd, reply_cmd;
+	struct net *net;
+	struct netns_ipvs *ipvs;
 
+	net = skb_sknet(skb);
+	ipvs = net_ipvs(net);
 	cmd = info->genlhdr->cmd;
 
 	if (cmd == IPVS_CMD_GET_SERVICE)
@@ -3214,7 +3312,8 @@
 	{
 		struct ip_vs_service *svc;
 
-		svc = ip_vs_genl_find_service(info->attrs[IPVS_CMD_ATTR_SERVICE]);
+		svc = ip_vs_genl_find_service(net,
+					      info->attrs[IPVS_CMD_ATTR_SERVICE]);
 		if (IS_ERR(svc)) {
 			ret = PTR_ERR(svc);
 			goto out_err;
@@ -3234,7 +3333,7 @@
 	{
 		struct ip_vs_timeout_user t;
 
-		__ip_vs_get_timeouts(&t);
+		__ip_vs_get_timeouts(net, &t);
 #ifdef CONFIG_IP_VS_PROTO_TCP
 		NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout);
 		NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
@@ -3380,62 +3479,173 @@
 
 /* End of Generic Netlink interface definitions */
 
+/*
+ * per netns intit/exit func.
+ */
+int __net_init __ip_vs_control_init(struct net *net)
+{
+	int idx;
+	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct ctl_table *tbl;
+
+	atomic_set(&ipvs->dropentry, 0);
+	spin_lock_init(&ipvs->dropentry_lock);
+	spin_lock_init(&ipvs->droppacket_lock);
+	spin_lock_init(&ipvs->securetcp_lock);
+	ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock);
+
+	/* Initialize rs_table */
+	for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
+		INIT_LIST_HEAD(&ipvs->rs_table[idx]);
+
+	INIT_LIST_HEAD(&ipvs->dest_trash);
+	atomic_set(&ipvs->ftpsvc_counter, 0);
+	atomic_set(&ipvs->nullsvc_counter, 0);
+
+	/* procfs stats */
+	ipvs->tot_stats = kzalloc(sizeof(struct ip_vs_stats), GFP_KERNEL);
+	if (ipvs->tot_stats == NULL) {
+		pr_err("%s(): no memory.\n", __func__);
+		return -ENOMEM;
+	}
+	ipvs->cpustats = alloc_percpu(struct ip_vs_cpu_stats);
+	if (!ipvs->cpustats) {
+		pr_err("%s() alloc_percpu failed\n", __func__);
+		goto err_alloc;
+	}
+	spin_lock_init(&ipvs->tot_stats->lock);
+
+	proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops);
+	proc_net_fops_create(net, "ip_vs_stats", 0, &ip_vs_stats_fops);
+	proc_net_fops_create(net, "ip_vs_stats_percpu", 0,
+			     &ip_vs_stats_percpu_fops);
+
+	if (!net_eq(net, &init_net)) {
+		tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL);
+		if (tbl == NULL)
+			goto err_dup;
+	} else
+		tbl = vs_vars;
+	/* Initialize sysctl defaults */
+	idx = 0;
+	ipvs->sysctl_amemthresh = 1024;
+	tbl[idx++].data = &ipvs->sysctl_amemthresh;
+	ipvs->sysctl_am_droprate = 10;
+	tbl[idx++].data = &ipvs->sysctl_am_droprate;
+	tbl[idx++].data = &ipvs->sysctl_drop_entry;
+	tbl[idx++].data = &ipvs->sysctl_drop_packet;
+#ifdef CONFIG_IP_VS_NFCT
+	tbl[idx++].data = &ipvs->sysctl_conntrack;
+#endif
+	tbl[idx++].data = &ipvs->sysctl_secure_tcp;
+	ipvs->sysctl_snat_reroute = 1;
+	tbl[idx++].data = &ipvs->sysctl_snat_reroute;
+	ipvs->sysctl_sync_ver = 1;
+	tbl[idx++].data = &ipvs->sysctl_sync_ver;
+	tbl[idx++].data = &ipvs->sysctl_cache_bypass;
+	tbl[idx++].data = &ipvs->sysctl_expire_nodest_conn;
+	tbl[idx++].data = &ipvs->sysctl_expire_quiescent_template;
+	ipvs->sysctl_sync_threshold[0] = 3;
+	ipvs->sysctl_sync_threshold[1] = 50;
+	tbl[idx].data = &ipvs->sysctl_sync_threshold;
+	tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
+	tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
+
+
+#ifdef CONFIG_SYSCTL
+	ipvs->sysctl_hdr = register_net_sysctl_table(net, net_vs_ctl_path,
+						     tbl);
+	if (ipvs->sysctl_hdr == NULL) {
+		if (!net_eq(net, &init_net))
+			kfree(tbl);
+		goto err_dup;
+	}
+#endif
+	ip_vs_new_estimator(net, ipvs->tot_stats);
+	ipvs->sysctl_tbl = tbl;
+	/* Schedule defense work */
+	INIT_DELAYED_WORK(&ipvs->defense_work, defense_work_handler);
+	schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
+	return 0;
+
+err_dup:
+	free_percpu(ipvs->cpustats);
+err_alloc:
+	kfree(ipvs->tot_stats);
+	return -ENOMEM;
+}
+
+static void __net_exit __ip_vs_control_cleanup(struct net *net)
+{
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
+	ip_vs_trash_cleanup(net);
+	ip_vs_kill_estimator(net, ipvs->tot_stats);
+	cancel_delayed_work_sync(&ipvs->defense_work);
+	cancel_work_sync(&ipvs->defense_work.work);
+#ifdef CONFIG_SYSCTL
+	unregister_net_sysctl_table(ipvs->sysctl_hdr);
+#endif
+	proc_net_remove(net, "ip_vs_stats_percpu");
+	proc_net_remove(net, "ip_vs_stats");
+	proc_net_remove(net, "ip_vs");
+	free_percpu(ipvs->cpustats);
+	kfree(ipvs->tot_stats);
+}
+
+static struct pernet_operations ipvs_control_ops = {
+	.init = __ip_vs_control_init,
+	.exit = __ip_vs_control_cleanup,
+};
 
 int __init ip_vs_control_init(void)
 {
-	int ret;
 	int idx;
+	int ret;
 
 	EnterFunction(2);
 
-	/* Initialize ip_vs_svc_table, ip_vs_svc_fwm_table, ip_vs_rtable */
+	/* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */
 	for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++)  {
 		INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
 		INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
 	}
-	for(idx = 0; idx < IP_VS_RTAB_SIZE; idx++)  {
-		INIT_LIST_HEAD(&ip_vs_rtable[idx]);
+
+	ret = register_pernet_subsys(&ipvs_control_ops);
+	if (ret) {
+		pr_err("cannot register namespace.\n");
+		goto err;
 	}
-	smp_wmb();
+
+	smp_wmb();	/* Do we really need it now ? */
 
 	ret = nf_register_sockopt(&ip_vs_sockopts);
 	if (ret) {
 		pr_err("cannot register sockopt.\n");
-		return ret;
+		goto err_net;
 	}
 
 	ret = ip_vs_genl_register();
 	if (ret) {
 		pr_err("cannot register Generic Netlink interface.\n");
 		nf_unregister_sockopt(&ip_vs_sockopts);
-		return ret;
+		goto err_net;
 	}
 
-	proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops);
-	proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops);
-
-	sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars);
-
-	ip_vs_new_estimator(&ip_vs_stats);
-
-	/* Hook the defense timer */
-	schedule_delayed_work(&defense_work, DEFENSE_TIMER_PERIOD);
-
 	LeaveFunction(2);
 	return 0;
+
+err_net:
+	unregister_pernet_subsys(&ipvs_control_ops);
+err:
+	return ret;
 }
 
 
 void ip_vs_control_cleanup(void)
 {
 	EnterFunction(2);
-	ip_vs_trash_cleanup();
-	cancel_delayed_work_sync(&defense_work);
-	cancel_work_sync(&defense_work.work);
-	ip_vs_kill_estimator(&ip_vs_stats);
-	unregister_sysctl_table(sysctl_header);
-	proc_net_remove(&init_net, "ip_vs_stats");
-	proc_net_remove(&init_net, "ip_vs");
+	unregister_pernet_subsys(&ipvs_control_ops);
 	ip_vs_genl_unregister();
 	nf_unregister_sockopt(&ip_vs_sockopts);
 	LeaveFunction(2);
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index ff28801..f560a05 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -8,8 +8,12 @@
  *              as published by the Free Software Foundation; either version
  *              2 of the License, or (at your option) any later version.
  *
- * Changes:
- *
+ * Changes:     Hans Schillstrom <hans.schillstrom@ericsson.com>
+ *              Network name space (netns) aware.
+ *              Global data moved to netns i.e struct netns_ipvs
+ *              Affected data: est_list and est_lock.
+ *              estimation_timer() runs with timer per netns.
+ *              get_stats()) do the per cpu summing.
  */
 
 #define KMSG_COMPONENT "IPVS"
@@ -48,11 +52,42 @@
  */
 
 
-static void estimation_timer(unsigned long arg);
+/*
+ * Make a summary from each cpu
+ */
+static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
+				 struct ip_vs_cpu_stats *stats)
+{
+	int i;
 
-static LIST_HEAD(est_list);
-static DEFINE_SPINLOCK(est_lock);
-static DEFINE_TIMER(est_timer, estimation_timer, 0, 0);
+	for_each_possible_cpu(i) {
+		struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i);
+		unsigned int start;
+		__u64 inbytes, outbytes;
+		if (i) {
+			sum->conns += s->ustats.conns;
+			sum->inpkts += s->ustats.inpkts;
+			sum->outpkts += s->ustats.outpkts;
+			do {
+				start = u64_stats_fetch_begin_bh(&s->syncp);
+				inbytes = s->ustats.inbytes;
+				outbytes = s->ustats.outbytes;
+			} while (u64_stats_fetch_retry_bh(&s->syncp, start));
+			sum->inbytes += inbytes;
+			sum->outbytes += outbytes;
+		} else {
+			sum->conns = s->ustats.conns;
+			sum->inpkts = s->ustats.inpkts;
+			sum->outpkts = s->ustats.outpkts;
+			do {
+				start = u64_stats_fetch_begin_bh(&s->syncp);
+				sum->inbytes = s->ustats.inbytes;
+				sum->outbytes = s->ustats.outbytes;
+			} while (u64_stats_fetch_retry_bh(&s->syncp, start));
+		}
+	}
+}
+
 
 static void estimation_timer(unsigned long arg)
 {
@@ -62,11 +97,16 @@
 	u32 n_inpkts, n_outpkts;
 	u64 n_inbytes, n_outbytes;
 	u32 rate;
+	struct net *net = (struct net *)arg;
+	struct netns_ipvs *ipvs;
 
-	spin_lock(&est_lock);
-	list_for_each_entry(e, &est_list, list) {
+	ipvs = net_ipvs(net);
+	ip_vs_read_cpu_stats(&ipvs->tot_stats->ustats, ipvs->cpustats);
+	spin_lock(&ipvs->est_lock);
+	list_for_each_entry(e, &ipvs->est_list, list) {
 		s = container_of(e, struct ip_vs_stats, est);
 
+		ip_vs_read_cpu_stats(&s->ustats, s->cpustats);
 		spin_lock(&s->lock);
 		n_conns = s->ustats.conns;
 		n_inpkts = s->ustats.inpkts;
@@ -75,38 +115,39 @@
 		n_outbytes = s->ustats.outbytes;
 
 		/* scaled by 2^10, but divided 2 seconds */
-		rate = (n_conns - e->last_conns)<<9;
+		rate = (n_conns - e->last_conns) << 9;
 		e->last_conns = n_conns;
-		e->cps += ((long)rate - (long)e->cps)>>2;
-		s->ustats.cps = (e->cps+0x1FF)>>10;
+		e->cps += ((long)rate - (long)e->cps) >> 2;
+		s->ustats.cps = (e->cps + 0x1FF) >> 10;
 
-		rate = (n_inpkts - e->last_inpkts)<<9;
+		rate = (n_inpkts - e->last_inpkts) << 9;
 		e->last_inpkts = n_inpkts;
-		e->inpps += ((long)rate - (long)e->inpps)>>2;
-		s->ustats.inpps = (e->inpps+0x1FF)>>10;
+		e->inpps += ((long)rate - (long)e->inpps) >> 2;
+		s->ustats.inpps = (e->inpps + 0x1FF) >> 10;
 
-		rate = (n_outpkts - e->last_outpkts)<<9;
+		rate = (n_outpkts - e->last_outpkts) << 9;
 		e->last_outpkts = n_outpkts;
-		e->outpps += ((long)rate - (long)e->outpps)>>2;
-		s->ustats.outpps = (e->outpps+0x1FF)>>10;
+		e->outpps += ((long)rate - (long)e->outpps) >> 2;
+		s->ustats.outpps = (e->outpps + 0x1FF) >> 10;
 
-		rate = (n_inbytes - e->last_inbytes)<<4;
+		rate = (n_inbytes - e->last_inbytes) << 4;
 		e->last_inbytes = n_inbytes;
-		e->inbps += ((long)rate - (long)e->inbps)>>2;
-		s->ustats.inbps = (e->inbps+0xF)>>5;
+		e->inbps += ((long)rate - (long)e->inbps) >> 2;
+		s->ustats.inbps = (e->inbps + 0xF) >> 5;
 
-		rate = (n_outbytes - e->last_outbytes)<<4;
+		rate = (n_outbytes - e->last_outbytes) << 4;
 		e->last_outbytes = n_outbytes;
-		e->outbps += ((long)rate - (long)e->outbps)>>2;
-		s->ustats.outbps = (e->outbps+0xF)>>5;
+		e->outbps += ((long)rate - (long)e->outbps) >> 2;
+		s->ustats.outbps = (e->outbps + 0xF) >> 5;
 		spin_unlock(&s->lock);
 	}
-	spin_unlock(&est_lock);
-	mod_timer(&est_timer, jiffies + 2*HZ);
+	spin_unlock(&ipvs->est_lock);
+	mod_timer(&ipvs->est_timer, jiffies + 2*HZ);
 }
 
-void ip_vs_new_estimator(struct ip_vs_stats *stats)
+void ip_vs_new_estimator(struct net *net, struct ip_vs_stats *stats)
 {
+	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_estimator *est = &stats->est;
 
 	INIT_LIST_HEAD(&est->list);
@@ -126,18 +167,19 @@
 	est->last_outbytes = stats->ustats.outbytes;
 	est->outbps = stats->ustats.outbps<<5;
 
-	spin_lock_bh(&est_lock);
-	list_add(&est->list, &est_list);
-	spin_unlock_bh(&est_lock);
+	spin_lock_bh(&ipvs->est_lock);
+	list_add(&est->list, &ipvs->est_list);
+	spin_unlock_bh(&ipvs->est_lock);
 }
 
-void ip_vs_kill_estimator(struct ip_vs_stats *stats)
+void ip_vs_kill_estimator(struct net *net, struct ip_vs_stats *stats)
 {
+	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct ip_vs_estimator *est = &stats->est;
 
-	spin_lock_bh(&est_lock);
+	spin_lock_bh(&ipvs->est_lock);
 	list_del(&est->list);
-	spin_unlock_bh(&est_lock);
+	spin_unlock_bh(&ipvs->est_lock);
 }
 
 void ip_vs_zero_estimator(struct ip_vs_stats *stats)
@@ -157,13 +199,35 @@
 	est->outbps = 0;
 }
 
+static int __net_init __ip_vs_estimator_init(struct net *net)
+{
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
+	INIT_LIST_HEAD(&ipvs->est_list);
+	spin_lock_init(&ipvs->est_lock);
+	setup_timer(&ipvs->est_timer, estimation_timer, (unsigned long)net);
+	mod_timer(&ipvs->est_timer, jiffies + 2 * HZ);
+	return 0;
+}
+
+static void __net_exit __ip_vs_estimator_exit(struct net *net)
+{
+	del_timer_sync(&net_ipvs(net)->est_timer);
+}
+static struct pernet_operations ip_vs_app_ops = {
+	.init = __ip_vs_estimator_init,
+	.exit = __ip_vs_estimator_exit,
+};
+
 int __init ip_vs_estimator_init(void)
 {
-	mod_timer(&est_timer, jiffies + 2 * HZ);
-	return 0;
+	int rv;
+
+	rv = register_pernet_subsys(&ip_vs_app_ops);
+	return rv;
 }
 
 void ip_vs_estimator_cleanup(void)
 {
-	del_timer_sync(&est_timer);
+	unregister_pernet_subsys(&ip_vs_app_ops);
 }
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 7545500..6b5dd6d 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -157,6 +157,7 @@
 	int ret = 0;
 	enum ip_conntrack_info ctinfo;
 	struct nf_conn *ct;
+	struct net *net;
 
 #ifdef CONFIG_IP_VS_IPV6
 	/* This application helper doesn't work with IPv6 yet,
@@ -197,18 +198,20 @@
 		 */
 		{
 			struct ip_vs_conn_param p;
-			ip_vs_conn_fill_param(AF_INET, iph->protocol,
-					      &from, port, &cp->caddr, 0, &p);
+			ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET,
+					      iph->protocol, &from, port,
+					      &cp->caddr, 0, &p);
 			n_cp = ip_vs_conn_out_get(&p);
 		}
 		if (!n_cp) {
 			struct ip_vs_conn_param p;
-			ip_vs_conn_fill_param(AF_INET, IPPROTO_TCP, &cp->caddr,
+			ip_vs_conn_fill_param(ip_vs_conn_net(cp),
+					      AF_INET, IPPROTO_TCP, &cp->caddr,
 					      0, &cp->vaddr, port, &p);
 			n_cp = ip_vs_conn_new(&p, &from, port,
 					      IP_VS_CONN_F_NO_CPORT |
 					      IP_VS_CONN_F_NFCT,
-					      cp->dest);
+					      cp->dest, skb->mark);
 			if (!n_cp)
 				return 0;
 
@@ -257,8 +260,9 @@
 		 * would be adjusted twice.
 		 */
 
+		net = skb_net(skb);
 		cp->app_data = NULL;
-		ip_vs_tcp_conn_listen(n_cp);
+		ip_vs_tcp_conn_listen(net, n_cp);
 		ip_vs_conn_put(n_cp);
 		return ret;
 	}
@@ -287,6 +291,7 @@
 	union nf_inet_addr to;
 	__be16 port;
 	struct ip_vs_conn *n_cp;
+	struct net *net;
 
 #ifdef CONFIG_IP_VS_IPV6
 	/* This application helper doesn't work with IPv6 yet,
@@ -358,14 +363,15 @@
 
 	{
 		struct ip_vs_conn_param p;
-		ip_vs_conn_fill_param(AF_INET, iph->protocol, &to, port,
-				      &cp->vaddr, htons(ntohs(cp->vport)-1),
-				      &p);
+		ip_vs_conn_fill_param(ip_vs_conn_net(cp), AF_INET,
+				      iph->protocol, &to, port, &cp->vaddr,
+				      htons(ntohs(cp->vport)-1), &p);
 		n_cp = ip_vs_conn_in_get(&p);
 		if (!n_cp) {
 			n_cp = ip_vs_conn_new(&p, &cp->daddr,
 					      htons(ntohs(cp->dport)-1),
-					      IP_VS_CONN_F_NFCT, cp->dest);
+					      IP_VS_CONN_F_NFCT, cp->dest,
+					      skb->mark);
 			if (!n_cp)
 				return 0;
 
@@ -377,7 +383,8 @@
 	/*
 	 *	Move tunnel to listen state
 	 */
-	ip_vs_tcp_conn_listen(n_cp);
+	net = skb_net(skb);
+	ip_vs_tcp_conn_listen(net, n_cp);
 	ip_vs_conn_put(n_cp);
 
 	return 1;
@@ -398,23 +405,22 @@
 	.pkt_in =	ip_vs_ftp_in,
 };
 
-
 /*
- *	ip_vs_ftp initialization
+ *	per netns ip_vs_ftp initialization
  */
-static int __init ip_vs_ftp_init(void)
+static int __net_init __ip_vs_ftp_init(struct net *net)
 {
 	int i, ret;
 	struct ip_vs_app *app = &ip_vs_ftp;
 
-	ret = register_ip_vs_app(app);
+	ret = register_ip_vs_app(net, app);
 	if (ret)
 		return ret;
 
 	for (i=0; i<IP_VS_APP_MAX_PORTS; i++) {
 		if (!ports[i])
 			continue;
-		ret = register_ip_vs_app_inc(app, app->protocol, ports[i]);
+		ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]);
 		if (ret)
 			break;
 		pr_info("%s: loaded support on port[%d] = %d\n",
@@ -422,18 +428,39 @@
 	}
 
 	if (ret)
-		unregister_ip_vs_app(app);
+		unregister_ip_vs_app(net, app);
 
 	return ret;
 }
+/*
+ *	netns exit
+ */
+static void __ip_vs_ftp_exit(struct net *net)
+{
+	struct ip_vs_app *app = &ip_vs_ftp;
 
+	unregister_ip_vs_app(net, app);
+}
+
+static struct pernet_operations ip_vs_ftp_ops = {
+	.init = __ip_vs_ftp_init,
+	.exit = __ip_vs_ftp_exit,
+};
+
+int __init ip_vs_ftp_init(void)
+{
+	int rv;
+
+	rv = register_pernet_subsys(&ip_vs_ftp_ops);
+	return rv;
+}
 
 /*
  *	ip_vs_ftp finish.
  */
 static void __exit ip_vs_ftp_exit(void)
 {
-	unregister_ip_vs_app(&ip_vs_ftp);
+	unregister_pernet_subsys(&ip_vs_ftp_ops);
 }
 
 
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 9323f89..00b5ffa 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -70,7 +70,6 @@
  *    entries that haven't been touched for a day.
  */
 #define COUNT_FOR_FULL_EXPIRATION   30
-static int sysctl_ip_vs_lblc_expiration = 24*60*60*HZ;
 
 
 /*
@@ -117,7 +116,7 @@
 static ctl_table vs_vars_table[] = {
 	{
 		.procname	= "lblc_expiration",
-		.data		= &sysctl_ip_vs_lblc_expiration,
+		.data		= NULL,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_jiffies,
@@ -125,8 +124,6 @@
 	{ }
 };
 
-static struct ctl_table_header * sysctl_header;
-
 static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
 {
 	list_del(&en->list);
@@ -248,6 +245,7 @@
 	struct ip_vs_lblc_entry *en, *nxt;
 	unsigned long now = jiffies;
 	int i, j;
+	struct netns_ipvs *ipvs = net_ipvs(svc->net);
 
 	for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
 		j = (j + 1) & IP_VS_LBLC_TAB_MASK;
@@ -255,7 +253,8 @@
 		write_lock(&svc->sched_lock);
 		list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
 			if (time_before(now,
-					en->lastuse + sysctl_ip_vs_lblc_expiration))
+					en->lastuse +
+					ipvs->sysctl_lblc_expiration))
 				continue;
 
 			ip_vs_lblc_free(en);
@@ -543,23 +542,73 @@
 	.schedule =		ip_vs_lblc_schedule,
 };
 
+/*
+ *  per netns init.
+ */
+static int __net_init __ip_vs_lblc_init(struct net *net)
+{
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
+	if (!net_eq(net, &init_net)) {
+		ipvs->lblc_ctl_table = kmemdup(vs_vars_table,
+						sizeof(vs_vars_table),
+						GFP_KERNEL);
+		if (ipvs->lblc_ctl_table == NULL)
+			return -ENOMEM;
+	} else
+		ipvs->lblc_ctl_table = vs_vars_table;
+	ipvs->sysctl_lblc_expiration = 24*60*60*HZ;
+	ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration;
+
+#ifdef CONFIG_SYSCTL
+	ipvs->lblc_ctl_header =
+		register_net_sysctl_table(net, net_vs_ctl_path,
+					  ipvs->lblc_ctl_table);
+	if (!ipvs->lblc_ctl_header) {
+		if (!net_eq(net, &init_net))
+			kfree(ipvs->lblc_ctl_table);
+		return -ENOMEM;
+	}
+#endif
+
+	return 0;
+}
+
+static void __net_exit __ip_vs_lblc_exit(struct net *net)
+{
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
+#ifdef CONFIG_SYSCTL
+	unregister_net_sysctl_table(ipvs->lblc_ctl_header);
+#endif
+
+	if (!net_eq(net, &init_net))
+		kfree(ipvs->lblc_ctl_table);
+}
+
+static struct pernet_operations ip_vs_lblc_ops = {
+	.init = __ip_vs_lblc_init,
+	.exit = __ip_vs_lblc_exit,
+};
 
 static int __init ip_vs_lblc_init(void)
 {
 	int ret;
 
-	sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
+	ret = register_pernet_subsys(&ip_vs_lblc_ops);
+	if (ret)
+		return ret;
+
 	ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
 	if (ret)
-		unregister_sysctl_table(sysctl_header);
+		unregister_pernet_subsys(&ip_vs_lblc_ops);
 	return ret;
 }
 
-
 static void __exit ip_vs_lblc_cleanup(void)
 {
-	unregister_sysctl_table(sysctl_header);
 	unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
+	unregister_pernet_subsys(&ip_vs_lblc_ops);
 }
 
 
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index dbeed8e..bfa25f1 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -70,8 +70,6 @@
  *    entries that haven't been touched for a day.
  */
 #define COUNT_FOR_FULL_EXPIRATION   30
-static int sysctl_ip_vs_lblcr_expiration = 24*60*60*HZ;
-
 
 /*
  *     for IPVS lblcr entry hash table
@@ -296,7 +294,7 @@
 static ctl_table vs_vars_table[] = {
 	{
 		.procname	= "lblcr_expiration",
-		.data		= &sysctl_ip_vs_lblcr_expiration,
+		.data		= NULL,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec_jiffies,
@@ -304,8 +302,6 @@
 	{ }
 };
 
-static struct ctl_table_header * sysctl_header;
-
 static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
 {
 	list_del(&en->list);
@@ -425,14 +421,15 @@
 	unsigned long now = jiffies;
 	int i, j;
 	struct ip_vs_lblcr_entry *en, *nxt;
+	struct netns_ipvs *ipvs = net_ipvs(svc->net);
 
 	for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
 		j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
 
 		write_lock(&svc->sched_lock);
 		list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
-			if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration,
-				       now))
+			if (time_after(en->lastuse
+					+ ipvs->sysctl_lblcr_expiration, now))
 				continue;
 
 			ip_vs_lblcr_free(en);
@@ -664,6 +661,7 @@
 	read_lock(&svc->sched_lock);
 	en = ip_vs_lblcr_get(svc->af, tbl, &iph.daddr);
 	if (en) {
+		struct netns_ipvs *ipvs = net_ipvs(svc->net);
 		/* We only hold a read lock, but this is atomic */
 		en->lastuse = jiffies;
 
@@ -675,7 +673,7 @@
 		/* More than one destination + enough time passed by, cleanup */
 		if (atomic_read(&en->set.size) > 1 &&
 				time_after(jiffies, en->set.lastmod +
-				sysctl_ip_vs_lblcr_expiration)) {
+				ipvs->sysctl_lblcr_expiration)) {
 			struct ip_vs_dest *m;
 
 			write_lock(&en->set.lock);
@@ -744,23 +742,73 @@
 	.schedule =		ip_vs_lblcr_schedule,
 };
 
+/*
+ *  per netns init.
+ */
+static int __net_init __ip_vs_lblcr_init(struct net *net)
+{
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
+	if (!net_eq(net, &init_net)) {
+		ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
+						sizeof(vs_vars_table),
+						GFP_KERNEL);
+		if (ipvs->lblcr_ctl_table == NULL)
+			return -ENOMEM;
+	} else
+		ipvs->lblcr_ctl_table = vs_vars_table;
+	ipvs->sysctl_lblcr_expiration = 24*60*60*HZ;
+	ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
+
+#ifdef CONFIG_SYSCTL
+	ipvs->lblcr_ctl_header =
+		register_net_sysctl_table(net, net_vs_ctl_path,
+					  ipvs->lblcr_ctl_table);
+	if (!ipvs->lblcr_ctl_header) {
+		if (!net_eq(net, &init_net))
+			kfree(ipvs->lblcr_ctl_table);
+		return -ENOMEM;
+	}
+#endif
+
+	return 0;
+}
+
+static void __net_exit __ip_vs_lblcr_exit(struct net *net)
+{
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
+#ifdef CONFIG_SYSCTL
+	unregister_net_sysctl_table(ipvs->lblcr_ctl_header);
+#endif
+
+	if (!net_eq(net, &init_net))
+		kfree(ipvs->lblcr_ctl_table);
+}
+
+static struct pernet_operations ip_vs_lblcr_ops = {
+	.init = __ip_vs_lblcr_init,
+	.exit = __ip_vs_lblcr_exit,
+};
 
 static int __init ip_vs_lblcr_init(void)
 {
 	int ret;
 
-	sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
+	ret = register_pernet_subsys(&ip_vs_lblcr_ops);
+	if (ret)
+		return ret;
+
 	ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
 	if (ret)
-		unregister_sysctl_table(sysctl_header);
+		unregister_pernet_subsys(&ip_vs_lblcr_ops);
 	return ret;
 }
 
-
 static void __exit ip_vs_lblcr_cleanup(void)
 {
-	unregister_sysctl_table(sysctl_header);
 	unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
+	unregister_pernet_subsys(&ip_vs_lblcr_ops);
 }
 
 
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index 4680647..f454c80 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -141,6 +141,7 @@
 	struct nf_conntrack_tuple *orig, new_reply;
 	struct ip_vs_conn *cp;
 	struct ip_vs_conn_param p;
+	struct net *net = nf_ct_net(ct);
 
 	if (exp->tuple.src.l3num != PF_INET)
 		return;
@@ -155,7 +156,7 @@
 
 	/* RS->CLIENT */
 	orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
-	ip_vs_conn_fill_param(exp->tuple.src.l3num, orig->dst.protonum,
+	ip_vs_conn_fill_param(net, exp->tuple.src.l3num, orig->dst.protonum,
 			      &orig->src.u3, orig->src.u.tcp.port,
 			      &orig->dst.u3, orig->dst.u.tcp.port, &p);
 	cp = ip_vs_conn_out_get(&p);
@@ -268,7 +269,8 @@
 		" for conn " FMT_CONN "\n",
 		__func__, ARG_TUPLE(&tuple), ARG_CONN(cp));
 
-	h = nf_conntrack_find_get(&init_net, NF_CT_DEFAULT_ZONE, &tuple);
+	h = nf_conntrack_find_get(ip_vs_conn_net(cp), NF_CT_DEFAULT_ZONE,
+				  &tuple);
 	if (h) {
 		ct = nf_ct_tuplehash_to_ctrack(h);
 		/* Show what happens instead of calling nf_ct_kill() */
diff --git a/net/netfilter/ipvs/ip_vs_pe.c b/net/netfilter/ipvs/ip_vs_pe.c
index 3414af7..5cf859c 100644
--- a/net/netfilter/ipvs/ip_vs_pe.c
+++ b/net/netfilter/ipvs/ip_vs_pe.c
@@ -29,12 +29,11 @@
 }
 
 /* Get pe in the pe list by name */
-static struct ip_vs_pe *
-ip_vs_pe_getbyname(const char *pe_name)
+struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name)
 {
 	struct ip_vs_pe *pe;
 
-	IP_VS_DBG(2, "%s(): pe_name \"%s\"\n", __func__,
+	IP_VS_DBG(10, "%s(): pe_name \"%s\"\n", __func__,
 		  pe_name);
 
 	spin_lock_bh(&ip_vs_pe_lock);
@@ -60,28 +59,22 @@
 }
 
 /* Lookup pe and try to load it if it doesn't exist */
-struct ip_vs_pe *ip_vs_pe_get(const char *name)
+struct ip_vs_pe *ip_vs_pe_getbyname(const char *name)
 {
 	struct ip_vs_pe *pe;
 
 	/* Search for the pe by name */
-	pe = ip_vs_pe_getbyname(name);
+	pe = __ip_vs_pe_getbyname(name);
 
 	/* If pe not found, load the module and search again */
 	if (!pe) {
 		request_module("ip_vs_pe_%s", name);
-		pe = ip_vs_pe_getbyname(name);
+		pe = __ip_vs_pe_getbyname(name);
 	}
 
 	return pe;
 }
 
-void ip_vs_pe_put(struct ip_vs_pe *pe)
-{
-	if (pe && pe->module)
-		module_put(pe->module);
-}
-
 /* Register a pe in the pe list */
 int register_ip_vs_pe(struct ip_vs_pe *pe)
 {
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
index b8b4e96..0d83bc0 100644
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@ -71,6 +71,7 @@
 	struct ip_vs_iphdr iph;
 	unsigned int dataoff, datalen, matchoff, matchlen;
 	const char *dptr;
+	int retc;
 
 	ip_vs_fill_iphdr(p->af, skb_network_header(skb), &iph);
 
@@ -83,6 +84,8 @@
 	if (dataoff >= skb->len)
 		return -EINVAL;
 
+	if ((retc=skb_linearize(skb)) < 0)
+		return retc;
 	dptr = skb->data + dataoff;
 	datalen = skb->len - dataoff;
 
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index c539983..17484a4 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -60,6 +60,35 @@
 	return 0;
 }
 
+#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) || \
+    defined(CONFIG_IP_VS_PROTO_SCTP) || defined(CONFIG_IP_VS_PROTO_AH) || \
+    defined(CONFIG_IP_VS_PROTO_ESP)
+/*
+ *	register an ipvs protocols netns related data
+ */
+static int
+register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
+{
+	struct netns_ipvs *ipvs = net_ipvs(net);
+	unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
+	struct ip_vs_proto_data *pd =
+			kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC);
+
+	if (!pd) {
+		pr_err("%s(): no memory.\n", __func__);
+		return -ENOMEM;
+	}
+	pd->pp = pp;	/* For speed issues */
+	pd->next = ipvs->proto_data_table[hash];
+	ipvs->proto_data_table[hash] = pd;
+	atomic_set(&pd->appcnt, 0);	/* Init app counter */
+
+	if (pp->init_netns != NULL)
+		pp->init_netns(net, pd);
+
+	return 0;
+}
+#endif
 
 /*
  *	unregister an ipvs protocol
@@ -82,6 +111,29 @@
 	return -ESRCH;
 }
 
+/*
+ *	unregister an ipvs protocols netns data
+ */
+static int
+unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd)
+{
+	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct ip_vs_proto_data **pd_p;
+	unsigned hash = IP_VS_PROTO_HASH(pd->pp->protocol);
+
+	pd_p = &ipvs->proto_data_table[hash];
+	for (; *pd_p; pd_p = &(*pd_p)->next) {
+		if (*pd_p == pd) {
+			*pd_p = pd->next;
+			if (pd->pp->exit_netns != NULL)
+				pd->pp->exit_netns(net, pd);
+			kfree(pd);
+			return 0;
+		}
+	}
+
+	return -ESRCH;
+}
 
 /*
  *	get ip_vs_protocol object by its proto.
@@ -100,19 +152,44 @@
 }
 EXPORT_SYMBOL(ip_vs_proto_get);
 
+/*
+ *	get ip_vs_protocol object data by netns and proto
+ */
+struct ip_vs_proto_data *
+__ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto)
+{
+	struct ip_vs_proto_data *pd;
+	unsigned hash = IP_VS_PROTO_HASH(proto);
+
+	for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) {
+		if (pd->pp->protocol == proto)
+			return pd;
+	}
+
+	return NULL;
+}
+
+struct ip_vs_proto_data *
+ip_vs_proto_data_get(struct net *net, unsigned short proto)
+{
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
+	return __ipvs_proto_data_get(ipvs, proto);
+}
+EXPORT_SYMBOL(ip_vs_proto_data_get);
 
 /*
  *	Propagate event for state change to all protocols
  */
-void ip_vs_protocol_timeout_change(int flags)
+void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags)
 {
-	struct ip_vs_protocol *pp;
+	struct ip_vs_proto_data *pd;
 	int i;
 
 	for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
-		for (pp = ip_vs_proto_table[i]; pp; pp = pp->next) {
-			if (pp->timeout_change)
-				pp->timeout_change(pp, flags);
+		for (pd = ipvs->proto_data_table[i]; pd; pd = pd->next) {
+			if (pd->pp->timeout_change)
+				pd->pp->timeout_change(pd, flags);
 		}
 	}
 }
@@ -236,6 +313,46 @@
 		ip_vs_tcpudp_debug_packet_v4(pp, skb, offset, msg);
 }
 
+/*
+ * per network name-space init
+ */
+static int __net_init __ip_vs_protocol_init(struct net *net)
+{
+#ifdef CONFIG_IP_VS_PROTO_TCP
+	register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_UDP
+	register_ip_vs_proto_netns(net, &ip_vs_protocol_udp);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_SCTP
+	register_ip_vs_proto_netns(net, &ip_vs_protocol_sctp);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_AH
+	register_ip_vs_proto_netns(net, &ip_vs_protocol_ah);
+#endif
+#ifdef CONFIG_IP_VS_PROTO_ESP
+	register_ip_vs_proto_netns(net, &ip_vs_protocol_esp);
+#endif
+	return 0;
+}
+
+static void __net_exit __ip_vs_protocol_cleanup(struct net *net)
+{
+	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct ip_vs_proto_data *pd;
+	int i;
+
+	/* unregister all the ipvs proto data for this netns */
+	for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
+		while ((pd = ipvs->proto_data_table[i]) != NULL)
+			unregister_ip_vs_proto_netns(net, pd);
+	}
+}
+
+static struct pernet_operations ipvs_proto_ops = {
+	.init = __ip_vs_protocol_init,
+	.exit = __ip_vs_protocol_cleanup,
+};
 
 int __init ip_vs_protocol_init(void)
 {
@@ -265,6 +382,7 @@
 	REGISTER_PROTOCOL(&ip_vs_protocol_esp);
 #endif
 	pr_info("Registered protocols (%s)\n", &protocols[2]);
+	return register_pernet_subsys(&ipvs_proto_ops);
 
 	return 0;
 }
@@ -275,6 +393,7 @@
 	struct ip_vs_protocol *pp;
 	int i;
 
+	unregister_pernet_subsys(&ipvs_proto_ops);
 	/* unregister all the ipvs protocols */
 	for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
 		while ((pp = ip_vs_proto_table[i]) != NULL)
diff --git a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
index 3a04611..5b8eb8b 100644
--- a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
@@ -41,28 +41,30 @@
 #define PORT_ISAKMP	500
 
 static void
-ah_esp_conn_fill_param_proto(int af, const struct ip_vs_iphdr *iph,
-			     int inverse, struct ip_vs_conn_param *p)
+ah_esp_conn_fill_param_proto(struct net *net, int af,
+			     const struct ip_vs_iphdr *iph, int inverse,
+			     struct ip_vs_conn_param *p)
 {
 	if (likely(!inverse))
-		ip_vs_conn_fill_param(af, IPPROTO_UDP,
+		ip_vs_conn_fill_param(net, af, IPPROTO_UDP,
 				      &iph->saddr, htons(PORT_ISAKMP),
 				      &iph->daddr, htons(PORT_ISAKMP), p);
 	else
-		ip_vs_conn_fill_param(af, IPPROTO_UDP,
+		ip_vs_conn_fill_param(net, af, IPPROTO_UDP,
 				      &iph->daddr, htons(PORT_ISAKMP),
 				      &iph->saddr, htons(PORT_ISAKMP), p);
 }
 
 static struct ip_vs_conn *
-ah_esp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
+ah_esp_conn_in_get(int af, const struct sk_buff *skb,
 		   const struct ip_vs_iphdr *iph, unsigned int proto_off,
 		   int inverse)
 {
 	struct ip_vs_conn *cp;
 	struct ip_vs_conn_param p;
+	struct net *net = skb_net(skb);
 
-	ah_esp_conn_fill_param_proto(af, iph, inverse, &p);
+	ah_esp_conn_fill_param_proto(net, af, iph, inverse, &p);
 	cp = ip_vs_conn_in_get(&p);
 	if (!cp) {
 		/*
@@ -72,7 +74,7 @@
 		IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for outin packet "
 			      "%s%s %s->%s\n",
 			      inverse ? "ICMP+" : "",
-			      pp->name,
+			      ip_vs_proto_get(iph->protocol)->name,
 			      IP_VS_DBG_ADDR(af, &iph->saddr),
 			      IP_VS_DBG_ADDR(af, &iph->daddr));
 	}
@@ -83,21 +85,21 @@
 
 static struct ip_vs_conn *
 ah_esp_conn_out_get(int af, const struct sk_buff *skb,
-		    struct ip_vs_protocol *pp,
 		    const struct ip_vs_iphdr *iph,
 		    unsigned int proto_off,
 		    int inverse)
 {
 	struct ip_vs_conn *cp;
 	struct ip_vs_conn_param p;
+	struct net *net = skb_net(skb);
 
-	ah_esp_conn_fill_param_proto(af, iph, inverse, &p);
+	ah_esp_conn_fill_param_proto(net, af, iph, inverse, &p);
 	cp = ip_vs_conn_out_get(&p);
 	if (!cp) {
 		IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for inout packet "
 			      "%s%s %s->%s\n",
 			      inverse ? "ICMP+" : "",
-			      pp->name,
+			      ip_vs_proto_get(iph->protocol)->name,
 			      IP_VS_DBG_ADDR(af, &iph->saddr),
 			      IP_VS_DBG_ADDR(af, &iph->daddr));
 	}
@@ -107,7 +109,7 @@
 
 
 static int
-ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
 		     int *verdict, struct ip_vs_conn **cpp)
 {
 	/*
@@ -117,26 +119,14 @@
 	return 0;
 }
 
-static void ah_esp_init(struct ip_vs_protocol *pp)
-{
-	/* nothing to do now */
-}
-
-
-static void ah_esp_exit(struct ip_vs_protocol *pp)
-{
-	/* nothing to do now */
-}
-
-
 #ifdef CONFIG_IP_VS_PROTO_AH
 struct ip_vs_protocol ip_vs_protocol_ah = {
 	.name =			"AH",
 	.protocol =		IPPROTO_AH,
 	.num_states =		1,
 	.dont_defrag =		1,
-	.init =			ah_esp_init,
-	.exit =			ah_esp_exit,
+	.init =			NULL,
+	.exit =			NULL,
 	.conn_schedule =	ah_esp_conn_schedule,
 	.conn_in_get =		ah_esp_conn_in_get,
 	.conn_out_get =		ah_esp_conn_out_get,
@@ -149,7 +139,6 @@
 	.app_conn_bind =	NULL,
 	.debug_packet =		ip_vs_tcpudp_debug_packet,
 	.timeout_change =	NULL,		/* ISAKMP */
-	.set_state_timeout =	NULL,
 };
 #endif
 
@@ -159,8 +148,8 @@
 	.protocol =		IPPROTO_ESP,
 	.num_states =		1,
 	.dont_defrag =		1,
-	.init =			ah_esp_init,
-	.exit =			ah_esp_exit,
+	.init =			NULL,
+	.exit =			NULL,
 	.conn_schedule =	ah_esp_conn_schedule,
 	.conn_in_get =		ah_esp_conn_in_get,
 	.conn_out_get =		ah_esp_conn_out_get,
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 1ea96bcd..b027ccc 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -9,9 +9,10 @@
 #include <net/ip_vs.h>
 
 static int
-sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
 		   int *verdict, struct ip_vs_conn **cpp)
 {
+	struct net *net;
 	struct ip_vs_service *svc;
 	sctp_chunkhdr_t _schunkh, *sch;
 	sctp_sctphdr_t *sh, _sctph;
@@ -27,13 +28,13 @@
 				 sizeof(_schunkh), &_schunkh);
 	if (sch == NULL)
 		return 0;
-
+	net = skb_net(skb);
 	if ((sch->type == SCTP_CID_INIT) &&
-	    (svc = ip_vs_service_get(af, skb->mark, iph.protocol,
+	    (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
 				     &iph.daddr, sh->dest))) {
 		int ignored;
 
-		if (ip_vs_todrop()) {
+		if (ip_vs_todrop(net_ipvs(net))) {
 			/*
 			 * It seems that we are very loaded.
 			 * We have to drop this packet :(
@@ -46,14 +47,19 @@
 		 * Let the virtual server select a real server for the
 		 * incoming connection, and create a connection entry.
 		 */
-		*cpp = ip_vs_schedule(svc, skb, pp, &ignored);
-		if (!*cpp && !ignored) {
-			*verdict = ip_vs_leave(svc, skb, pp);
+		*cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+		if (!*cpp && ignored <= 0) {
+			if (!ignored)
+				*verdict = ip_vs_leave(svc, skb, pd);
+			else {
+				ip_vs_service_put(svc);
+				*verdict = NF_DROP;
+			}
 			return 0;
 		}
 		ip_vs_service_put(svc);
 	}
-
+	/* NF_ACCEPT */
 	return 1;
 }
 
@@ -856,7 +862,7 @@
 /*
  *      Timeout table[state]
  */
-static int sctp_timeouts[IP_VS_SCTP_S_LAST + 1] = {
+static const int sctp_timeouts[IP_VS_SCTP_S_LAST + 1] = {
 	[IP_VS_SCTP_S_NONE]         =     2 * HZ,
 	[IP_VS_SCTP_S_INIT_CLI]     =     1 * 60 * HZ,
 	[IP_VS_SCTP_S_INIT_SER]     =     1 * 60 * HZ,
@@ -900,20 +906,8 @@
 	return "?";
 }
 
-static void sctp_timeout_change(struct ip_vs_protocol *pp, int flags)
-{
-}
-
-static int
-sctp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
-{
-
-return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_SCTP_S_LAST,
-				sctp_state_name_table, sname, to);
-}
-
 static inline int
-set_sctp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
+set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
 		int direction, const struct sk_buff *skb)
 {
 	sctp_chunkhdr_t _sctpch, *sch;
@@ -971,7 +965,7 @@
 
 		IP_VS_DBG_BUF(8, "%s %s  %s:%d->"
 				"%s:%d state: %s->%s conn->refcnt:%d\n",
-				pp->name,
+				pd->pp->name,
 				((direction == IP_VS_DIR_OUTPUT) ?
 				 "output " : "input "),
 				IP_VS_DBG_ADDR(cp->af, &cp->daddr),
@@ -995,75 +989,73 @@
 			}
 		}
 	}
+	if (likely(pd))
+		cp->timeout = pd->timeout_table[cp->state = next_state];
+	else	/* What to do ? */
+		cp->timeout = sctp_timeouts[cp->state = next_state];
 
-	 cp->timeout = pp->timeout_table[cp->state = next_state];
-
-	 return 1;
+	return 1;
 }
 
 static int
 sctp_state_transition(struct ip_vs_conn *cp, int direction,
-		const struct sk_buff *skb, struct ip_vs_protocol *pp)
+		const struct sk_buff *skb, struct ip_vs_proto_data *pd)
 {
 	int ret = 0;
 
 	spin_lock(&cp->lock);
-	ret = set_sctp_state(pp, cp, direction, skb);
+	ret = set_sctp_state(pd, cp, direction, skb);
 	spin_unlock(&cp->lock);
 
 	return ret;
 }
 
-/*
- *      Hash table for SCTP application incarnations
- */
-#define SCTP_APP_TAB_BITS        4
-#define SCTP_APP_TAB_SIZE        (1 << SCTP_APP_TAB_BITS)
-#define SCTP_APP_TAB_MASK        (SCTP_APP_TAB_SIZE - 1)
-
-static struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
-static DEFINE_SPINLOCK(sctp_app_lock);
-
 static inline __u16 sctp_app_hashkey(__be16 port)
 {
 	return (((__force u16)port >> SCTP_APP_TAB_BITS) ^ (__force u16)port)
 		& SCTP_APP_TAB_MASK;
 }
 
-static int sctp_register_app(struct ip_vs_app *inc)
+static int sctp_register_app(struct net *net, struct ip_vs_app *inc)
 {
 	struct ip_vs_app *i;
 	__u16 hash;
 	__be16 port = inc->port;
 	int ret = 0;
+	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP);
 
 	hash = sctp_app_hashkey(port);
 
-	spin_lock_bh(&sctp_app_lock);
-	list_for_each_entry(i, &sctp_apps[hash], p_list) {
+	spin_lock_bh(&ipvs->sctp_app_lock);
+	list_for_each_entry(i, &ipvs->sctp_apps[hash], p_list) {
 		if (i->port == port) {
 			ret = -EEXIST;
 			goto out;
 		}
 	}
-	list_add(&inc->p_list, &sctp_apps[hash]);
-	atomic_inc(&ip_vs_protocol_sctp.appcnt);
+	list_add(&inc->p_list, &ipvs->sctp_apps[hash]);
+	atomic_inc(&pd->appcnt);
 out:
-	spin_unlock_bh(&sctp_app_lock);
+	spin_unlock_bh(&ipvs->sctp_app_lock);
 
 	return ret;
 }
 
-static void sctp_unregister_app(struct ip_vs_app *inc)
+static void sctp_unregister_app(struct net *net, struct ip_vs_app *inc)
 {
-	spin_lock_bh(&sctp_app_lock);
-	atomic_dec(&ip_vs_protocol_sctp.appcnt);
+	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP);
+
+	spin_lock_bh(&ipvs->sctp_app_lock);
+	atomic_dec(&pd->appcnt);
 	list_del(&inc->p_list);
-	spin_unlock_bh(&sctp_app_lock);
+	spin_unlock_bh(&ipvs->sctp_app_lock);
 }
 
 static int sctp_app_conn_bind(struct ip_vs_conn *cp)
 {
+	struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
 	int hash;
 	struct ip_vs_app *inc;
 	int result = 0;
@@ -1074,12 +1066,12 @@
 	/* Lookup application incarnations and bind the right one */
 	hash = sctp_app_hashkey(cp->vport);
 
-	spin_lock(&sctp_app_lock);
-	list_for_each_entry(inc, &sctp_apps[hash], p_list) {
+	spin_lock(&ipvs->sctp_app_lock);
+	list_for_each_entry(inc, &ipvs->sctp_apps[hash], p_list) {
 		if (inc->port == cp->vport) {
 			if (unlikely(!ip_vs_app_inc_get(inc)))
 				break;
-			spin_unlock(&sctp_app_lock);
+			spin_unlock(&ipvs->sctp_app_lock);
 
 			IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->"
 					"%s:%u to app %s on port %u\n",
@@ -1095,43 +1087,50 @@
 			goto out;
 		}
 	}
-	spin_unlock(&sctp_app_lock);
+	spin_unlock(&ipvs->sctp_app_lock);
 out:
 	return result;
 }
 
-static void ip_vs_sctp_init(struct ip_vs_protocol *pp)
+/* ---------------------------------------------
+ *   timeouts is netns related now.
+ * ---------------------------------------------
+ */
+static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
 {
-	IP_VS_INIT_HASH_TABLE(sctp_apps);
-	pp->timeout_table = sctp_timeouts;
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
+	ip_vs_init_hash_table(ipvs->sctp_apps, SCTP_APP_TAB_SIZE);
+	spin_lock_init(&ipvs->sctp_app_lock);
+	pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts,
+							sizeof(sctp_timeouts));
 }
 
-
-static void ip_vs_sctp_exit(struct ip_vs_protocol *pp)
+static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd)
 {
-
+	kfree(pd->timeout_table);
 }
 
 struct ip_vs_protocol ip_vs_protocol_sctp = {
-	.name = "SCTP",
-	.protocol = IPPROTO_SCTP,
-	.num_states = IP_VS_SCTP_S_LAST,
-	.dont_defrag = 0,
-	.appcnt = ATOMIC_INIT(0),
-	.init = ip_vs_sctp_init,
-	.exit = ip_vs_sctp_exit,
-	.register_app = sctp_register_app,
+	.name		= "SCTP",
+	.protocol	= IPPROTO_SCTP,
+	.num_states	= IP_VS_SCTP_S_LAST,
+	.dont_defrag	= 0,
+	.init		= NULL,
+	.exit		= NULL,
+	.init_netns	= __ip_vs_sctp_init,
+	.exit_netns	= __ip_vs_sctp_exit,
+	.register_app	= sctp_register_app,
 	.unregister_app = sctp_unregister_app,
-	.conn_schedule = sctp_conn_schedule,
-	.conn_in_get = ip_vs_conn_in_get_proto,
-	.conn_out_get = ip_vs_conn_out_get_proto,
-	.snat_handler = sctp_snat_handler,
-	.dnat_handler = sctp_dnat_handler,
-	.csum_check = sctp_csum_check,
-	.state_name = sctp_state_name,
+	.conn_schedule	= sctp_conn_schedule,
+	.conn_in_get	= ip_vs_conn_in_get_proto,
+	.conn_out_get	= ip_vs_conn_out_get_proto,
+	.snat_handler	= sctp_snat_handler,
+	.dnat_handler	= sctp_dnat_handler,
+	.csum_check	= sctp_csum_check,
+	.state_name	= sctp_state_name,
 	.state_transition = sctp_state_transition,
-	.app_conn_bind = sctp_app_conn_bind,
-	.debug_packet = ip_vs_tcpudp_debug_packet,
-	.timeout_change = sctp_timeout_change,
-	.set_state_timeout = sctp_set_state_timeout,
+	.app_conn_bind	= sctp_app_conn_bind,
+	.debug_packet	= ip_vs_tcpudp_debug_packet,
+	.timeout_change	= NULL,
 };
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index f6c5200..c0cc341 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -9,8 +9,12 @@
  *              as published by the Free Software Foundation; either version
  *              2 of the License, or (at your option) any later version.
  *
- * Changes:
+ * Changes:     Hans Schillstrom <hans.schillstrom@ericsson.com>
  *
+ *              Network name space (netns) aware.
+ *              Global data moved to netns i.e struct netns_ipvs
+ *              tcp_timeouts table has copy per netns in a hash table per
+ *              protocol ip_vs_proto_data and is handled by netns
  */
 
 #define KMSG_COMPONENT "IPVS"
@@ -28,9 +32,10 @@
 #include <net/ip_vs.h>
 
 static int
-tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
 		  int *verdict, struct ip_vs_conn **cpp)
 {
+	struct net *net;
 	struct ip_vs_service *svc;
 	struct tcphdr _tcph, *th;
 	struct ip_vs_iphdr iph;
@@ -42,14 +47,14 @@
 		*verdict = NF_DROP;
 		return 0;
 	}
-
+	net = skb_net(skb);
 	/* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */
 	if (th->syn &&
-	    (svc = ip_vs_service_get(af, skb->mark, iph.protocol, &iph.daddr,
-				     th->dest))) {
+	    (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
+				     &iph.daddr, th->dest))) {
 		int ignored;
 
-		if (ip_vs_todrop()) {
+		if (ip_vs_todrop(net_ipvs(net))) {
 			/*
 			 * It seems that we are very loaded.
 			 * We have to drop this packet :(
@@ -63,13 +68,19 @@
 		 * Let the virtual server select a real server for the
 		 * incoming connection, and create a connection entry.
 		 */
-		*cpp = ip_vs_schedule(svc, skb, pp, &ignored);
-		if (!*cpp && !ignored) {
-			*verdict = ip_vs_leave(svc, skb, pp);
+		*cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+		if (!*cpp && ignored <= 0) {
+			if (!ignored)
+				*verdict = ip_vs_leave(svc, skb, pd);
+			else {
+				ip_vs_service_put(svc);
+				*verdict = NF_DROP;
+			}
 			return 0;
 		}
 		ip_vs_service_put(svc);
 	}
+	/* NF_ACCEPT */
 	return 1;
 }
 
@@ -338,7 +349,7 @@
 /*
  *	Timeout table[state]
  */
-static int tcp_timeouts[IP_VS_TCP_S_LAST+1] = {
+static const int tcp_timeouts[IP_VS_TCP_S_LAST+1] = {
 	[IP_VS_TCP_S_NONE]		=	2*HZ,
 	[IP_VS_TCP_S_ESTABLISHED]	=	15*60*HZ,
 	[IP_VS_TCP_S_SYN_SENT]		=	2*60*HZ,
@@ -437,10 +448,7 @@
 /*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
 };
 
-static struct tcp_states_t *tcp_state_table = tcp_states;
-
-
-static void tcp_timeout_change(struct ip_vs_protocol *pp, int flags)
+static void tcp_timeout_change(struct ip_vs_proto_data *pd, int flags)
 {
 	int on = (flags & 1);		/* secure_tcp */
 
@@ -450,14 +458,7 @@
 	** for most if not for all of the applications. Something
 	** like "capabilities" (flags) for each object.
 	*/
-	tcp_state_table = (on? tcp_states_dos : tcp_states);
-}
-
-static int
-tcp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
-{
-	return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_TCP_S_LAST,
-				       tcp_state_name_table, sname, to);
+	pd->tcp_state_table = (on ? tcp_states_dos : tcp_states);
 }
 
 static inline int tcp_state_idx(struct tcphdr *th)
@@ -474,7 +475,7 @@
 }
 
 static inline void
-set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
+set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
 	      int direction, struct tcphdr *th)
 {
 	int state_idx;
@@ -497,7 +498,8 @@
 		goto tcp_state_out;
 	}
 
-	new_state = tcp_state_table[state_off+state_idx].next_state[cp->state];
+	new_state =
+		pd->tcp_state_table[state_off+state_idx].next_state[cp->state];
 
   tcp_state_out:
 	if (new_state != cp->state) {
@@ -505,7 +507,7 @@
 
 		IP_VS_DBG_BUF(8, "%s %s [%c%c%c%c] %s:%d->"
 			      "%s:%d state: %s->%s conn->refcnt:%d\n",
-			      pp->name,
+			      pd->pp->name,
 			      ((state_off == TCP_DIR_OUTPUT) ?
 			       "output " : "input "),
 			      th->syn ? 'S' : '.',
@@ -535,17 +537,19 @@
 		}
 	}
 
-	cp->timeout = pp->timeout_table[cp->state = new_state];
+	if (likely(pd))
+		cp->timeout = pd->timeout_table[cp->state = new_state];
+	else	/* What to do ? */
+		cp->timeout = tcp_timeouts[cp->state = new_state];
 }
 
-
 /*
  *	Handle state transitions
  */
 static int
 tcp_state_transition(struct ip_vs_conn *cp, int direction,
 		     const struct sk_buff *skb,
-		     struct ip_vs_protocol *pp)
+		     struct ip_vs_proto_data *pd)
 {
 	struct tcphdr _tcph, *th;
 
@@ -560,23 +564,12 @@
 		return 0;
 
 	spin_lock(&cp->lock);
-	set_tcp_state(pp, cp, direction, th);
+	set_tcp_state(pd, cp, direction, th);
 	spin_unlock(&cp->lock);
 
 	return 1;
 }
 
-
-/*
- *	Hash table for TCP application incarnations
- */
-#define	TCP_APP_TAB_BITS	4
-#define	TCP_APP_TAB_SIZE	(1 << TCP_APP_TAB_BITS)
-#define	TCP_APP_TAB_MASK	(TCP_APP_TAB_SIZE - 1)
-
-static struct list_head tcp_apps[TCP_APP_TAB_SIZE];
-static DEFINE_SPINLOCK(tcp_app_lock);
-
 static inline __u16 tcp_app_hashkey(__be16 port)
 {
 	return (((__force u16)port >> TCP_APP_TAB_BITS) ^ (__force u16)port)
@@ -584,44 +577,50 @@
 }
 
 
-static int tcp_register_app(struct ip_vs_app *inc)
+static int tcp_register_app(struct net *net, struct ip_vs_app *inc)
 {
 	struct ip_vs_app *i;
 	__u16 hash;
 	__be16 port = inc->port;
 	int ret = 0;
+	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
 
 	hash = tcp_app_hashkey(port);
 
-	spin_lock_bh(&tcp_app_lock);
-	list_for_each_entry(i, &tcp_apps[hash], p_list) {
+	spin_lock_bh(&ipvs->tcp_app_lock);
+	list_for_each_entry(i, &ipvs->tcp_apps[hash], p_list) {
 		if (i->port == port) {
 			ret = -EEXIST;
 			goto out;
 		}
 	}
-	list_add(&inc->p_list, &tcp_apps[hash]);
-	atomic_inc(&ip_vs_protocol_tcp.appcnt);
+	list_add(&inc->p_list, &ipvs->tcp_apps[hash]);
+	atomic_inc(&pd->appcnt);
 
   out:
-	spin_unlock_bh(&tcp_app_lock);
+	spin_unlock_bh(&ipvs->tcp_app_lock);
 	return ret;
 }
 
 
 static void
-tcp_unregister_app(struct ip_vs_app *inc)
+tcp_unregister_app(struct net *net, struct ip_vs_app *inc)
 {
-	spin_lock_bh(&tcp_app_lock);
-	atomic_dec(&ip_vs_protocol_tcp.appcnt);
+	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+
+	spin_lock_bh(&ipvs->tcp_app_lock);
+	atomic_dec(&pd->appcnt);
 	list_del(&inc->p_list);
-	spin_unlock_bh(&tcp_app_lock);
+	spin_unlock_bh(&ipvs->tcp_app_lock);
 }
 
 
 static int
 tcp_app_conn_bind(struct ip_vs_conn *cp)
 {
+	struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
 	int hash;
 	struct ip_vs_app *inc;
 	int result = 0;
@@ -633,12 +632,12 @@
 	/* Lookup application incarnations and bind the right one */
 	hash = tcp_app_hashkey(cp->vport);
 
-	spin_lock(&tcp_app_lock);
-	list_for_each_entry(inc, &tcp_apps[hash], p_list) {
+	spin_lock(&ipvs->tcp_app_lock);
+	list_for_each_entry(inc, &ipvs->tcp_apps[hash], p_list) {
 		if (inc->port == cp->vport) {
 			if (unlikely(!ip_vs_app_inc_get(inc)))
 				break;
-			spin_unlock(&tcp_app_lock);
+			spin_unlock(&ipvs->tcp_app_lock);
 
 			IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
 				      "%s:%u to app %s on port %u\n",
@@ -655,7 +654,7 @@
 			goto out;
 		}
 	}
-	spin_unlock(&tcp_app_lock);
+	spin_unlock(&ipvs->tcp_app_lock);
 
   out:
 	return result;
@@ -665,24 +664,35 @@
 /*
  *	Set LISTEN timeout. (ip_vs_conn_put will setup timer)
  */
-void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp)
+void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp)
 {
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
+
 	spin_lock(&cp->lock);
 	cp->state = IP_VS_TCP_S_LISTEN;
-	cp->timeout = ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_LISTEN];
+	cp->timeout = (pd ? pd->timeout_table[IP_VS_TCP_S_LISTEN]
+			   : tcp_timeouts[IP_VS_TCP_S_LISTEN]);
 	spin_unlock(&cp->lock);
 }
 
-
-static void ip_vs_tcp_init(struct ip_vs_protocol *pp)
+/* ---------------------------------------------
+ *   timeouts is netns related now.
+ * ---------------------------------------------
+ */
+static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
 {
-	IP_VS_INIT_HASH_TABLE(tcp_apps);
-	pp->timeout_table = tcp_timeouts;
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
+	ip_vs_init_hash_table(ipvs->tcp_apps, TCP_APP_TAB_SIZE);
+	spin_lock_init(&ipvs->tcp_app_lock);
+	pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
+							sizeof(tcp_timeouts));
+	pd->tcp_state_table =  tcp_states;
 }
 
-
-static void ip_vs_tcp_exit(struct ip_vs_protocol *pp)
+static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd)
 {
+	kfree(pd->timeout_table);
 }
 
 
@@ -691,9 +701,10 @@
 	.protocol =		IPPROTO_TCP,
 	.num_states =		IP_VS_TCP_S_LAST,
 	.dont_defrag =		0,
-	.appcnt =		ATOMIC_INIT(0),
-	.init =			ip_vs_tcp_init,
-	.exit =			ip_vs_tcp_exit,
+	.init =			NULL,
+	.exit =			NULL,
+	.init_netns =		__ip_vs_tcp_init,
+	.exit_netns =		__ip_vs_tcp_exit,
 	.register_app =		tcp_register_app,
 	.unregister_app =	tcp_unregister_app,
 	.conn_schedule =	tcp_conn_schedule,
@@ -707,5 +718,4 @@
 	.app_conn_bind =	tcp_app_conn_bind,
 	.debug_packet =		ip_vs_tcpudp_debug_packet,
 	.timeout_change =	tcp_timeout_change,
-	.set_state_timeout =	tcp_set_state_timeout,
 };
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index 9d106a0..f1282cb 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -9,7 +9,8 @@
  *              as published by the Free Software Foundation; either version
  *              2 of the License, or (at your option) any later version.
  *
- * Changes:
+ * Changes:     Hans Schillstrom <hans.schillstrom@ericsson.com>
+ *              Network name space (netns) aware.
  *
  */
 
@@ -28,9 +29,10 @@
 #include <net/ip6_checksum.h>
 
 static int
-udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
+udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
 		  int *verdict, struct ip_vs_conn **cpp)
 {
+	struct net *net;
 	struct ip_vs_service *svc;
 	struct udphdr _udph, *uh;
 	struct ip_vs_iphdr iph;
@@ -42,13 +44,13 @@
 		*verdict = NF_DROP;
 		return 0;
 	}
-
-	svc = ip_vs_service_get(af, skb->mark, iph.protocol,
+	net = skb_net(skb);
+	svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
 				&iph.daddr, uh->dest);
 	if (svc) {
 		int ignored;
 
-		if (ip_vs_todrop()) {
+		if (ip_vs_todrop(net_ipvs(net))) {
 			/*
 			 * It seems that we are very loaded.
 			 * We have to drop this packet :(
@@ -62,13 +64,19 @@
 		 * Let the virtual server select a real server for the
 		 * incoming connection, and create a connection entry.
 		 */
-		*cpp = ip_vs_schedule(svc, skb, pp, &ignored);
-		if (!*cpp && !ignored) {
-			*verdict = ip_vs_leave(svc, skb, pp);
+		*cpp = ip_vs_schedule(svc, skb, pd, &ignored);
+		if (!*cpp && ignored <= 0) {
+			if (!ignored)
+				*verdict = ip_vs_leave(svc, skb, pd);
+			else {
+				ip_vs_service_put(svc);
+				*verdict = NF_DROP;
+			}
 			return 0;
 		}
 		ip_vs_service_put(svc);
 	}
+	/* NF_ACCEPT */
 	return 1;
 }
 
@@ -338,19 +346,6 @@
 	return 1;
 }
 
-
-/*
- *	Note: the caller guarantees that only one of register_app,
- *	unregister_app or app_conn_bind is called each time.
- */
-
-#define	UDP_APP_TAB_BITS	4
-#define	UDP_APP_TAB_SIZE	(1 << UDP_APP_TAB_BITS)
-#define	UDP_APP_TAB_MASK	(UDP_APP_TAB_SIZE - 1)
-
-static struct list_head udp_apps[UDP_APP_TAB_SIZE];
-static DEFINE_SPINLOCK(udp_app_lock);
-
 static inline __u16 udp_app_hashkey(__be16 port)
 {
 	return (((__force u16)port >> UDP_APP_TAB_BITS) ^ (__force u16)port)
@@ -358,44 +353,50 @@
 }
 
 
-static int udp_register_app(struct ip_vs_app *inc)
+static int udp_register_app(struct net *net, struct ip_vs_app *inc)
 {
 	struct ip_vs_app *i;
 	__u16 hash;
 	__be16 port = inc->port;
 	int ret = 0;
+	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
 
 	hash = udp_app_hashkey(port);
 
 
-	spin_lock_bh(&udp_app_lock);
-	list_for_each_entry(i, &udp_apps[hash], p_list) {
+	spin_lock_bh(&ipvs->udp_app_lock);
+	list_for_each_entry(i, &ipvs->udp_apps[hash], p_list) {
 		if (i->port == port) {
 			ret = -EEXIST;
 			goto out;
 		}
 	}
-	list_add(&inc->p_list, &udp_apps[hash]);
-	atomic_inc(&ip_vs_protocol_udp.appcnt);
+	list_add(&inc->p_list, &ipvs->udp_apps[hash]);
+	atomic_inc(&pd->appcnt);
 
   out:
-	spin_unlock_bh(&udp_app_lock);
+	spin_unlock_bh(&ipvs->udp_app_lock);
 	return ret;
 }
 
 
 static void
-udp_unregister_app(struct ip_vs_app *inc)
+udp_unregister_app(struct net *net, struct ip_vs_app *inc)
 {
-	spin_lock_bh(&udp_app_lock);
-	atomic_dec(&ip_vs_protocol_udp.appcnt);
+	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
+	spin_lock_bh(&ipvs->udp_app_lock);
+	atomic_dec(&pd->appcnt);
 	list_del(&inc->p_list);
-	spin_unlock_bh(&udp_app_lock);
+	spin_unlock_bh(&ipvs->udp_app_lock);
 }
 
 
 static int udp_app_conn_bind(struct ip_vs_conn *cp)
 {
+	struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
 	int hash;
 	struct ip_vs_app *inc;
 	int result = 0;
@@ -407,12 +408,12 @@
 	/* Lookup application incarnations and bind the right one */
 	hash = udp_app_hashkey(cp->vport);
 
-	spin_lock(&udp_app_lock);
-	list_for_each_entry(inc, &udp_apps[hash], p_list) {
+	spin_lock(&ipvs->udp_app_lock);
+	list_for_each_entry(inc, &ipvs->udp_apps[hash], p_list) {
 		if (inc->port == cp->vport) {
 			if (unlikely(!ip_vs_app_inc_get(inc)))
 				break;
-			spin_unlock(&udp_app_lock);
+			spin_unlock(&ipvs->udp_app_lock);
 
 			IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
 				      "%s:%u to app %s on port %u\n",
@@ -429,14 +430,14 @@
 			goto out;
 		}
 	}
-	spin_unlock(&udp_app_lock);
+	spin_unlock(&ipvs->udp_app_lock);
 
   out:
 	return result;
 }
 
 
-static int udp_timeouts[IP_VS_UDP_S_LAST+1] = {
+static const int udp_timeouts[IP_VS_UDP_S_LAST+1] = {
 	[IP_VS_UDP_S_NORMAL]		=	5*60*HZ,
 	[IP_VS_UDP_S_LAST]		=	2*HZ,
 };
@@ -446,14 +447,6 @@
 	[IP_VS_UDP_S_LAST]		=	"BUG!",
 };
 
-
-static int
-udp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
-{
-	return ip_vs_set_state_timeout(pp->timeout_table, IP_VS_UDP_S_LAST,
-				       udp_state_name_table, sname, to);
-}
-
 static const char * udp_state_name(int state)
 {
 	if (state >= IP_VS_UDP_S_LAST)
@@ -464,20 +457,30 @@
 static int
 udp_state_transition(struct ip_vs_conn *cp, int direction,
 		     const struct sk_buff *skb,
-		     struct ip_vs_protocol *pp)
+		     struct ip_vs_proto_data *pd)
 {
-	cp->timeout = pp->timeout_table[IP_VS_UDP_S_NORMAL];
+	if (unlikely(!pd)) {
+		pr_err("UDP no ns data\n");
+		return 0;
+	}
+
+	cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
 	return 1;
 }
 
-static void udp_init(struct ip_vs_protocol *pp)
+static void __udp_init(struct net *net, struct ip_vs_proto_data *pd)
 {
-	IP_VS_INIT_HASH_TABLE(udp_apps);
-	pp->timeout_table = udp_timeouts;
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
+	ip_vs_init_hash_table(ipvs->udp_apps, UDP_APP_TAB_SIZE);
+	spin_lock_init(&ipvs->udp_app_lock);
+	pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
+							sizeof(udp_timeouts));
 }
 
-static void udp_exit(struct ip_vs_protocol *pp)
+static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
 {
+	kfree(pd->timeout_table);
 }
 
 
@@ -486,8 +489,10 @@
 	.protocol =		IPPROTO_UDP,
 	.num_states =		IP_VS_UDP_S_LAST,
 	.dont_defrag =		0,
-	.init =			udp_init,
-	.exit =			udp_exit,
+	.init =			NULL,
+	.exit =			NULL,
+	.init_netns =		__udp_init,
+	.exit_netns =		__udp_exit,
 	.conn_schedule =	udp_conn_schedule,
 	.conn_in_get =		ip_vs_conn_in_get_proto,
 	.conn_out_get =		ip_vs_conn_out_get_proto,
@@ -501,5 +506,4 @@
 	.app_conn_bind =	udp_app_conn_bind,
 	.debug_packet =		ip_vs_tcpudp_debug_packet,
 	.timeout_change =	NULL,
-	.set_state_timeout =	udp_set_state_timeout,
 };
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index ab85aed..d1b7298 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -5,6 +5,18 @@
  *              high-performance and highly available server based on a
  *              cluster of servers.
  *
+ * Version 1,   is capable of handling both version 0 and 1 messages.
+ *              Version 0 is the plain old format.
+ *              Note Version 0 receivers will just drop Ver 1 messages.
+ *              Version 1 is capable of handle IPv6, Persistence data,
+ *              time-outs, and firewall marks.
+ *              In ver.1 "ip_vs_sync_conn_options" will be sent in netw. order.
+ *              Ver. 0 can be turned on by sysctl -w net.ipv4.vs.sync_version=0
+ *
+ * Definitions  Message: is a complete datagram
+ *              Sync_conn: is a part of a Message
+ *              Param Data is an option to a Sync_conn.
+ *
  * Authors:     Wensong Zhang <wensong@linuxvirtualserver.org>
  *
  * ip_vs_sync:  sync connection info from master load balancer to backups
@@ -15,6 +27,8 @@
  *	Alexandre Cassen	:	Added SyncID support for incoming sync
  *					messages filtering.
  *	Justin Ossevoort	:	Fix endian problem on sync message size.
+ *	Hans Schillstrom	:	Added Version 1: i.e. IPv6,
+ *					Persistence support, fwmark and time-out.
  */
 
 #define KMSG_COMPONENT "IPVS"
@@ -35,6 +49,8 @@
 #include <linux/wait.h>
 #include <linux/kernel.h>
 
+#include <asm/unaligned.h>		/* Used for ntoh_seq and hton_seq */
+
 #include <net/ip.h>
 #include <net/sock.h>
 
@@ -43,11 +59,13 @@
 #define IP_VS_SYNC_GROUP 0xe0000051    /* multicast addr - 224.0.0.81 */
 #define IP_VS_SYNC_PORT  8848          /* multicast port */
 
+#define SYNC_PROTO_VER  1		/* Protocol version in header */
 
 /*
  *	IPVS sync connection entry
+ *	Version 0, i.e. original version.
  */
-struct ip_vs_sync_conn {
+struct ip_vs_sync_conn_v0 {
 	__u8			reserved;
 
 	/* Protocol, addresses and port numbers */
@@ -71,41 +89,159 @@
 	struct ip_vs_seq        out_seq;        /* outgoing seq. struct */
 };
 
-struct ip_vs_sync_thread_data {
-	struct socket *sock;
-	char *buf;
-};
-
-#define SIMPLE_CONN_SIZE  (sizeof(struct ip_vs_sync_conn))
-#define FULL_CONN_SIZE  \
-(sizeof(struct ip_vs_sync_conn) + sizeof(struct ip_vs_sync_conn_options))
-
-
 /*
-  The master mulitcasts messages to the backup load balancers in the
-  following format.
+     Sync Connection format (sync_conn)
 
        0                   1                   2                   3
        0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-      |  Count Conns  |    SyncID     |            Size               |
+      |    Type       |    Protocol   | Ver.  |        Size           |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                             Flags                             |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |            State              |         cport                 |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |            vport              |         dport                 |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                             fwmark                            |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                             timeout  (in sec.)                |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                              ...                              |
+      |                        IP-Addresses  (v4 or v6)               |
+      |                              ...                              |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+  Optional Parameters.
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      | Param. Type    | Param. Length |   Param. data                |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+                               |
+      |                              ...                              |
+      |                               +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                               | Param Type    | Param. Length |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                           Param  data                         |
+      |         Last Param data should be padded for 32 bit alignment |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+
+/*
+ *  Type 0, IPv4 sync connection format
+ */
+struct ip_vs_sync_v4 {
+	__u8			type;
+	__u8			protocol;	/* Which protocol (TCP/UDP) */
+	__be16			ver_size;	/* Version msb 4 bits */
+	/* Flags and state transition */
+	__be32			flags;		/* status flags */
+	__be16			state;		/* state info 	*/
+	/* Protocol, addresses and port numbers */
+	__be16			cport;
+	__be16			vport;
+	__be16			dport;
+	__be32			fwmark;		/* Firewall mark from skb */
+	__be32			timeout;	/* cp timeout */
+	__be32			caddr;		/* client address */
+	__be32			vaddr;		/* virtual address */
+	__be32			daddr;		/* destination address */
+	/* The sequence options start here */
+	/* PE data padded to 32bit alignment after seq. options */
+};
+/*
+ * Type 2 messages IPv6
+ */
+struct ip_vs_sync_v6 {
+	__u8			type;
+	__u8			protocol;	/* Which protocol (TCP/UDP) */
+	__be16			ver_size;	/* Version msb 4 bits */
+	/* Flags and state transition */
+	__be32			flags;		/* status flags */
+	__be16			state;		/* state info 	*/
+	/* Protocol, addresses and port numbers */
+	__be16			cport;
+	__be16			vport;
+	__be16			dport;
+	__be32			fwmark;		/* Firewall mark from skb */
+	__be32			timeout;	/* cp timeout */
+	struct in6_addr		caddr;		/* client address */
+	struct in6_addr		vaddr;		/* virtual address */
+	struct in6_addr		daddr;		/* destination address */
+	/* The sequence options start here */
+	/* PE data padded to 32bit alignment after seq. options */
+};
+
+union ip_vs_sync_conn {
+	struct ip_vs_sync_v4	v4;
+	struct ip_vs_sync_v6	v6;
+};
+
+/* Bits in Type field in above */
+#define STYPE_INET6		0
+#define STYPE_F_INET6		(1 << STYPE_INET6)
+
+#define SVER_SHIFT		12		/* Shift to get version */
+#define SVER_MASK		0x0fff		/* Mask to strip version */
+
+#define IPVS_OPT_SEQ_DATA	1
+#define IPVS_OPT_PE_DATA	2
+#define IPVS_OPT_PE_NAME	3
+#define IPVS_OPT_PARAM		7
+
+#define IPVS_OPT_F_SEQ_DATA	(1 << (IPVS_OPT_SEQ_DATA-1))
+#define IPVS_OPT_F_PE_DATA	(1 << (IPVS_OPT_PE_DATA-1))
+#define IPVS_OPT_F_PE_NAME	(1 << (IPVS_OPT_PE_NAME-1))
+#define IPVS_OPT_F_PARAM	(1 << (IPVS_OPT_PARAM-1))
+
+struct ip_vs_sync_thread_data {
+	struct net *net;
+	struct socket *sock;
+	char *buf;
+};
+
+/* Version 0 definition of packet sizes */
+#define SIMPLE_CONN_SIZE  (sizeof(struct ip_vs_sync_conn_v0))
+#define FULL_CONN_SIZE  \
+(sizeof(struct ip_vs_sync_conn_v0) + sizeof(struct ip_vs_sync_conn_options))
+
+
+/*
+  The master mulitcasts messages (Datagrams) to the backup load balancers
+  in the following format.
+
+ Version 1:
+  Note, first byte should be Zero, so ver 0 receivers will drop the packet.
+
+       0                   1                   2                   3
+       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |      0        |    SyncID     |            Size               |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |  Count Conns  |    Version    |    Reserved, set to Zero      |
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
       |                                                               |
       |                    IPVS Sync Connection (1)                   |
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
       |                            .                                  |
-      |                            .                                  |
+      ~                            .                                  ~
       |                            .                                  |
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
       |                                                               |
       |                    IPVS Sync Connection (n)                   |
       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Version 0 Header
+       0                   1                   2                   3
+       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |  Count Conns  |    SyncID     |            Size               |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      |                    IPVS Sync Connection (1)                   |
 */
 
 #define SYNC_MESG_HEADER_LEN	4
 #define MAX_CONNS_PER_SYNCBUFF	255 /* nr_conns in ip_vs_sync_mesg is 8 bit */
 
-struct ip_vs_sync_mesg {
+/* Version 0 header */
+struct ip_vs_sync_mesg_v0 {
 	__u8                    nr_conns;
 	__u8                    syncid;
 	__u16                   size;
@@ -113,9 +249,16 @@
 	/* ip_vs_sync_conn entries start here */
 };
 
-/* the maximum length of sync (sending/receiving) message */
-static int sync_send_mesg_maxlen;
-static int sync_recv_mesg_maxlen;
+/* Version 1 header */
+struct ip_vs_sync_mesg {
+	__u8			reserved;	/* must be zero */
+	__u8			syncid;
+	__u16			size;
+	__u8			nr_conns;
+	__s8			version;	/* SYNC_PROTO_VER  */
+	__u16			spare;
+	/* ip_vs_sync_conn entries start here */
+};
 
 struct ip_vs_sync_buff {
 	struct list_head        list;
@@ -127,28 +270,6 @@
 	unsigned char           *end;
 };
 
-
-/* the sync_buff list head and the lock */
-static LIST_HEAD(ip_vs_sync_queue);
-static DEFINE_SPINLOCK(ip_vs_sync_lock);
-
-/* current sync_buff for accepting new conn entries */
-static struct ip_vs_sync_buff   *curr_sb = NULL;
-static DEFINE_SPINLOCK(curr_sb_lock);
-
-/* ipvs sync daemon state */
-volatile int ip_vs_sync_state = IP_VS_STATE_NONE;
-volatile int ip_vs_master_syncid = 0;
-volatile int ip_vs_backup_syncid = 0;
-
-/* multicast interface name */
-char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
-
-/* sync daemon tasks */
-static struct task_struct *sync_master_thread;
-static struct task_struct *sync_backup_thread;
-
 /* multicast addr */
 static struct sockaddr_in mcast_addr = {
 	.sin_family		= AF_INET,
@@ -156,41 +277,71 @@
 	.sin_addr.s_addr	= cpu_to_be32(IP_VS_SYNC_GROUP),
 };
 
+/*
+ * Copy of struct ip_vs_seq
+ * From unaligned network order to aligned host order
+ */
+static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho)
+{
+	ho->init_seq       = get_unaligned_be32(&no->init_seq);
+	ho->delta          = get_unaligned_be32(&no->delta);
+	ho->previous_delta = get_unaligned_be32(&no->previous_delta);
+}
 
-static inline struct ip_vs_sync_buff *sb_dequeue(void)
+/*
+ * Copy of struct ip_vs_seq
+ * From Aligned host order to unaligned network order
+ */
+static void hton_seq(struct ip_vs_seq *ho, struct ip_vs_seq *no)
+{
+	put_unaligned_be32(ho->init_seq, &no->init_seq);
+	put_unaligned_be32(ho->delta, &no->delta);
+	put_unaligned_be32(ho->previous_delta, &no->previous_delta);
+}
+
+static inline struct ip_vs_sync_buff *sb_dequeue(struct netns_ipvs *ipvs)
 {
 	struct ip_vs_sync_buff *sb;
 
-	spin_lock_bh(&ip_vs_sync_lock);
-	if (list_empty(&ip_vs_sync_queue)) {
+	spin_lock_bh(&ipvs->sync_lock);
+	if (list_empty(&ipvs->sync_queue)) {
 		sb = NULL;
 	} else {
-		sb = list_entry(ip_vs_sync_queue.next,
+		sb = list_entry(ipvs->sync_queue.next,
 				struct ip_vs_sync_buff,
 				list);
 		list_del(&sb->list);
 	}
-	spin_unlock_bh(&ip_vs_sync_lock);
+	spin_unlock_bh(&ipvs->sync_lock);
 
 	return sb;
 }
 
-static inline struct ip_vs_sync_buff * ip_vs_sync_buff_create(void)
+/*
+ * Create a new sync buffer for Version 1 proto.
+ */
+static inline struct ip_vs_sync_buff *
+ip_vs_sync_buff_create(struct netns_ipvs *ipvs)
 {
 	struct ip_vs_sync_buff *sb;
 
 	if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
 		return NULL;
 
-	if (!(sb->mesg=kmalloc(sync_send_mesg_maxlen, GFP_ATOMIC))) {
+	sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC);
+	if (!sb->mesg) {
 		kfree(sb);
 		return NULL;
 	}
+	sb->mesg->reserved = 0;  /* old nr_conns i.e. must be zeo now */
+	sb->mesg->version = SYNC_PROTO_VER;
+	sb->mesg->syncid = ipvs->master_syncid;
+	sb->mesg->size = sizeof(struct ip_vs_sync_mesg);
 	sb->mesg->nr_conns = 0;
-	sb->mesg->syncid = ip_vs_master_syncid;
-	sb->mesg->size = 4;
-	sb->head = (unsigned char *)sb->mesg + 4;
-	sb->end = (unsigned char *)sb->mesg + sync_send_mesg_maxlen;
+	sb->mesg->spare = 0;
+	sb->head = (unsigned char *)sb->mesg + sizeof(struct ip_vs_sync_mesg);
+	sb->end = (unsigned char *)sb->mesg + ipvs->send_mesg_maxlen;
+
 	sb->firstuse = jiffies;
 	return sb;
 }
@@ -201,14 +352,16 @@
 	kfree(sb);
 }
 
-static inline void sb_queue_tail(struct ip_vs_sync_buff *sb)
+static inline void sb_queue_tail(struct netns_ipvs *ipvs)
 {
-	spin_lock(&ip_vs_sync_lock);
-	if (ip_vs_sync_state & IP_VS_STATE_MASTER)
-		list_add_tail(&sb->list, &ip_vs_sync_queue);
+	struct ip_vs_sync_buff *sb = ipvs->sync_buff;
+
+	spin_lock(&ipvs->sync_lock);
+	if (ipvs->sync_state & IP_VS_STATE_MASTER)
+		list_add_tail(&sb->list, &ipvs->sync_queue);
 	else
 		ip_vs_sync_buff_release(sb);
-	spin_unlock(&ip_vs_sync_lock);
+	spin_unlock(&ipvs->sync_lock);
 }
 
 /*
@@ -216,36 +369,101 @@
  *	than the specified time or the specified time is zero.
  */
 static inline struct ip_vs_sync_buff *
-get_curr_sync_buff(unsigned long time)
+get_curr_sync_buff(struct netns_ipvs *ipvs, unsigned long time)
 {
 	struct ip_vs_sync_buff *sb;
 
-	spin_lock_bh(&curr_sb_lock);
-	if (curr_sb && (time == 0 ||
-			time_before(jiffies - curr_sb->firstuse, time))) {
-		sb = curr_sb;
-		curr_sb = NULL;
+	spin_lock_bh(&ipvs->sync_buff_lock);
+	if (ipvs->sync_buff && (time == 0 ||
+	    time_before(jiffies - ipvs->sync_buff->firstuse, time))) {
+		sb = ipvs->sync_buff;
+		ipvs->sync_buff = NULL;
 	} else
 		sb = NULL;
-	spin_unlock_bh(&curr_sb_lock);
+	spin_unlock_bh(&ipvs->sync_buff_lock);
 	return sb;
 }
 
+/*
+ * Switch mode from sending version 0 or 1
+ *  - must handle sync_buf
+ */
+void ip_vs_sync_switch_mode(struct net *net, int mode)
+{
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
+	if (!(ipvs->sync_state & IP_VS_STATE_MASTER))
+		return;
+	if (mode == ipvs->sysctl_sync_ver || !ipvs->sync_buff)
+		return;
+
+	spin_lock_bh(&ipvs->sync_buff_lock);
+	/* Buffer empty ? then let buf_create do the job  */
+	if (ipvs->sync_buff->mesg->size <=  sizeof(struct ip_vs_sync_mesg)) {
+		kfree(ipvs->sync_buff);
+		ipvs->sync_buff = NULL;
+	} else {
+		spin_lock_bh(&ipvs->sync_lock);
+		if (ipvs->sync_state & IP_VS_STATE_MASTER)
+			list_add_tail(&ipvs->sync_buff->list,
+				      &ipvs->sync_queue);
+		else
+			ip_vs_sync_buff_release(ipvs->sync_buff);
+		spin_unlock_bh(&ipvs->sync_lock);
+	}
+	spin_unlock_bh(&ipvs->sync_buff_lock);
+}
 
 /*
- *      Add an ip_vs_conn information into the current sync_buff.
- *      Called by ip_vs_in.
+ * Create a new sync buffer for Version 0 proto.
  */
-void ip_vs_sync_conn(struct ip_vs_conn *cp)
+static inline struct ip_vs_sync_buff *
+ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs)
 {
-	struct ip_vs_sync_mesg *m;
-	struct ip_vs_sync_conn *s;
+	struct ip_vs_sync_buff *sb;
+	struct ip_vs_sync_mesg_v0 *mesg;
+
+	if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
+		return NULL;
+
+	sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC);
+	if (!sb->mesg) {
+		kfree(sb);
+		return NULL;
+	}
+	mesg = (struct ip_vs_sync_mesg_v0 *)sb->mesg;
+	mesg->nr_conns = 0;
+	mesg->syncid = ipvs->master_syncid;
+	mesg->size = sizeof(struct ip_vs_sync_mesg_v0);
+	sb->head = (unsigned char *)mesg + sizeof(struct ip_vs_sync_mesg_v0);
+	sb->end = (unsigned char *)mesg + ipvs->send_mesg_maxlen;
+	sb->firstuse = jiffies;
+	return sb;
+}
+
+/*
+ *      Version 0 , could be switched in by sys_ctl.
+ *      Add an ip_vs_conn information into the current sync_buff.
+ */
+void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp)
+{
+	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct ip_vs_sync_mesg_v0 *m;
+	struct ip_vs_sync_conn_v0 *s;
 	int len;
 
-	spin_lock(&curr_sb_lock);
-	if (!curr_sb) {
-		if (!(curr_sb=ip_vs_sync_buff_create())) {
-			spin_unlock(&curr_sb_lock);
+	if (unlikely(cp->af != AF_INET))
+		return;
+	/* Do not sync ONE PACKET */
+	if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+		return;
+
+	spin_lock(&ipvs->sync_buff_lock);
+	if (!ipvs->sync_buff) {
+		ipvs->sync_buff =
+			ip_vs_sync_buff_create_v0(ipvs);
+		if (!ipvs->sync_buff) {
+			spin_unlock(&ipvs->sync_buff_lock);
 			pr_err("ip_vs_sync_buff_create failed.\n");
 			return;
 		}
@@ -253,10 +471,11 @@
 
 	len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE :
 		SIMPLE_CONN_SIZE;
-	m = curr_sb->mesg;
-	s = (struct ip_vs_sync_conn *)curr_sb->head;
+	m = (struct ip_vs_sync_mesg_v0 *)ipvs->sync_buff->mesg;
+	s = (struct ip_vs_sync_conn_v0 *)ipvs->sync_buff->head;
 
 	/* copy members */
+	s->reserved = 0;
 	s->protocol = cp->protocol;
 	s->cport = cp->cport;
 	s->vport = cp->vport;
@@ -274,83 +493,366 @@
 
 	m->nr_conns++;
 	m->size += len;
-	curr_sb->head += len;
+	ipvs->sync_buff->head += len;
 
 	/* check if there is a space for next one */
-	if (curr_sb->head+FULL_CONN_SIZE > curr_sb->end) {
-		sb_queue_tail(curr_sb);
-		curr_sb = NULL;
+	if (ipvs->sync_buff->head + FULL_CONN_SIZE > ipvs->sync_buff->end) {
+		sb_queue_tail(ipvs);
+		ipvs->sync_buff = NULL;
 	}
-	spin_unlock(&curr_sb_lock);
+	spin_unlock(&ipvs->sync_buff_lock);
 
 	/* synchronize its controller if it has */
 	if (cp->control)
-		ip_vs_sync_conn(cp->control);
+		ip_vs_sync_conn(net, cp->control);
 }
 
-static inline int
-ip_vs_conn_fill_param_sync(int af, int protocol,
-			   const union nf_inet_addr *caddr, __be16 cport,
-			   const union nf_inet_addr *vaddr, __be16 vport,
-			   struct ip_vs_conn_param *p)
+/*
+ *      Add an ip_vs_conn information into the current sync_buff.
+ *      Called by ip_vs_in.
+ *      Sending Version 1 messages
+ */
+void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp)
 {
-	/* XXX: Need to take into account persistence engine */
-	ip_vs_conn_fill_param(af, protocol, caddr, cport, vaddr, vport, p);
+	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct ip_vs_sync_mesg *m;
+	union ip_vs_sync_conn *s;
+	__u8 *p;
+	unsigned int len, pe_name_len, pad;
+
+	/* Handle old version of the protocol */
+	if (ipvs->sysctl_sync_ver == 0) {
+		ip_vs_sync_conn_v0(net, cp);
+		return;
+	}
+	/* Do not sync ONE PACKET */
+	if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
+		goto control;
+sloop:
+	/* Sanity checks */
+	pe_name_len = 0;
+	if (cp->pe_data_len) {
+		if (!cp->pe_data || !cp->dest) {
+			IP_VS_ERR_RL("SYNC, connection pe_data invalid\n");
+			return;
+		}
+		pe_name_len = strnlen(cp->pe->name, IP_VS_PENAME_MAXLEN);
+	}
+
+	spin_lock(&ipvs->sync_buff_lock);
+
+#ifdef CONFIG_IP_VS_IPV6
+	if (cp->af == AF_INET6)
+		len = sizeof(struct ip_vs_sync_v6);
+	else
+#endif
+		len = sizeof(struct ip_vs_sync_v4);
+
+	if (cp->flags & IP_VS_CONN_F_SEQ_MASK)
+		len += sizeof(struct ip_vs_sync_conn_options) + 2;
+
+	if (cp->pe_data_len)
+		len += cp->pe_data_len + 2;	/* + Param hdr field */
+	if (pe_name_len)
+		len += pe_name_len + 2;
+
+	/* check if there is a space for this one  */
+	pad = 0;
+	if (ipvs->sync_buff) {
+		pad = (4 - (size_t)ipvs->sync_buff->head) & 3;
+		if (ipvs->sync_buff->head + len + pad > ipvs->sync_buff->end) {
+			sb_queue_tail(ipvs);
+			ipvs->sync_buff = NULL;
+			pad = 0;
+		}
+	}
+
+	if (!ipvs->sync_buff) {
+		ipvs->sync_buff = ip_vs_sync_buff_create(ipvs);
+		if (!ipvs->sync_buff) {
+			spin_unlock(&ipvs->sync_buff_lock);
+			pr_err("ip_vs_sync_buff_create failed.\n");
+			return;
+		}
+	}
+
+	m = ipvs->sync_buff->mesg;
+	p = ipvs->sync_buff->head;
+	ipvs->sync_buff->head += pad + len;
+	m->size += pad + len;
+	/* Add ev. padding from prev. sync_conn */
+	while (pad--)
+		*(p++) = 0;
+
+	s = (union ip_vs_sync_conn *)p;
+
+	/* Set message type  & copy members */
+	s->v4.type = (cp->af == AF_INET6 ? STYPE_F_INET6 : 0);
+	s->v4.ver_size = htons(len & SVER_MASK);	/* Version 0 */
+	s->v4.flags = htonl(cp->flags & ~IP_VS_CONN_F_HASHED);
+	s->v4.state = htons(cp->state);
+	s->v4.protocol = cp->protocol;
+	s->v4.cport = cp->cport;
+	s->v4.vport = cp->vport;
+	s->v4.dport = cp->dport;
+	s->v4.fwmark = htonl(cp->fwmark);
+	s->v4.timeout = htonl(cp->timeout / HZ);
+	m->nr_conns++;
+
+#ifdef CONFIG_IP_VS_IPV6
+	if (cp->af == AF_INET6) {
+		p += sizeof(struct ip_vs_sync_v6);
+		ipv6_addr_copy(&s->v6.caddr, &cp->caddr.in6);
+		ipv6_addr_copy(&s->v6.vaddr, &cp->vaddr.in6);
+		ipv6_addr_copy(&s->v6.daddr, &cp->daddr.in6);
+	} else
+#endif
+	{
+		p += sizeof(struct ip_vs_sync_v4);	/* options ptr */
+		s->v4.caddr = cp->caddr.ip;
+		s->v4.vaddr = cp->vaddr.ip;
+		s->v4.daddr = cp->daddr.ip;
+	}
+	if (cp->flags & IP_VS_CONN_F_SEQ_MASK) {
+		*(p++) = IPVS_OPT_SEQ_DATA;
+		*(p++) = sizeof(struct ip_vs_sync_conn_options);
+		hton_seq((struct ip_vs_seq *)p, &cp->in_seq);
+		p += sizeof(struct ip_vs_seq);
+		hton_seq((struct ip_vs_seq *)p, &cp->out_seq);
+		p += sizeof(struct ip_vs_seq);
+	}
+	/* Handle pe data */
+	if (cp->pe_data_len && cp->pe_data) {
+		*(p++) = IPVS_OPT_PE_DATA;
+		*(p++) = cp->pe_data_len;
+		memcpy(p, cp->pe_data, cp->pe_data_len);
+		p += cp->pe_data_len;
+		if (pe_name_len) {
+			/* Add PE_NAME */
+			*(p++) = IPVS_OPT_PE_NAME;
+			*(p++) = pe_name_len;
+			memcpy(p, cp->pe->name, pe_name_len);
+			p += pe_name_len;
+		}
+	}
+
+	spin_unlock(&ipvs->sync_buff_lock);
+
+control:
+	/* synchronize its controller if it has */
+	cp = cp->control;
+	if (!cp)
+		return;
+	/*
+	 * Reduce sync rate for templates
+	 * i.e only increment in_pkts for Templates.
+	 */
+	if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
+		int pkts = atomic_add_return(1, &cp->in_pkts);
+
+		if (pkts % ipvs->sysctl_sync_threshold[1] != 1)
+			return;
+	}
+	goto sloop;
+}
+
+/*
+ *  fill_param used by version 1
+ */
+static inline int
+ip_vs_conn_fill_param_sync(struct net *net, int af, union ip_vs_sync_conn *sc,
+			   struct ip_vs_conn_param *p,
+			   __u8 *pe_data, unsigned int pe_data_len,
+			   __u8 *pe_name, unsigned int pe_name_len)
+{
+#ifdef CONFIG_IP_VS_IPV6
+	if (af == AF_INET6)
+		ip_vs_conn_fill_param(net, af, sc->v6.protocol,
+				      (const union nf_inet_addr *)&sc->v6.caddr,
+				      sc->v6.cport,
+				      (const union nf_inet_addr *)&sc->v6.vaddr,
+				      sc->v6.vport, p);
+	else
+#endif
+		ip_vs_conn_fill_param(net, af, sc->v4.protocol,
+				      (const union nf_inet_addr *)&sc->v4.caddr,
+				      sc->v4.cport,
+				      (const union nf_inet_addr *)&sc->v4.vaddr,
+				      sc->v4.vport, p);
+	/* Handle pe data */
+	if (pe_data_len) {
+		if (pe_name_len) {
+			char buff[IP_VS_PENAME_MAXLEN+1];
+
+			memcpy(buff, pe_name, pe_name_len);
+			buff[pe_name_len]=0;
+			p->pe = __ip_vs_pe_getbyname(buff);
+			if (!p->pe) {
+				IP_VS_DBG(3, "BACKUP, no %s engine found/loaded\n",
+					     buff);
+				return 1;
+			}
+		} else {
+			IP_VS_ERR_RL("BACKUP, Invalid PE parameters\n");
+			return 1;
+		}
+
+		p->pe_data = kmalloc(pe_data_len, GFP_ATOMIC);
+		if (!p->pe_data) {
+			if (p->pe->module)
+				module_put(p->pe->module);
+			return -ENOMEM;
+		}
+		memcpy(p->pe_data, pe_data, pe_data_len);
+		p->pe_data_len = pe_data_len;
+	}
 	return 0;
 }
 
 /*
- *      Process received multicast message and create the corresponding
- *      ip_vs_conn entries.
+ *  Connection Add / Update.
+ *  Common for version 0 and 1 reception of backup sync_conns.
+ *  Param: ...
+ *         timeout is in sec.
  */
-static void ip_vs_process_message(const char *buffer, const size_t buflen)
+static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
+			    unsigned int flags, unsigned int state,
+			    unsigned int protocol, unsigned int type,
+			    const union nf_inet_addr *daddr, __be16 dport,
+			    unsigned long timeout, __u32 fwmark,
+			    struct ip_vs_sync_conn_options *opt)
 {
-	struct ip_vs_sync_mesg *m = (struct ip_vs_sync_mesg *)buffer;
-	struct ip_vs_sync_conn *s;
-	struct ip_vs_sync_conn_options *opt;
-	struct ip_vs_conn *cp;
-	struct ip_vs_protocol *pp;
 	struct ip_vs_dest *dest;
+	struct ip_vs_conn *cp;
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
+	if (!(flags & IP_VS_CONN_F_TEMPLATE))
+		cp = ip_vs_conn_in_get(param);
+	else
+		cp = ip_vs_ct_in_get(param);
+
+	if (cp && param->pe_data) 	/* Free pe_data */
+		kfree(param->pe_data);
+	if (!cp) {
+		/*
+		 * Find the appropriate destination for the connection.
+		 * If it is not found the connection will remain unbound
+		 * but still handled.
+		 */
+		dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr,
+				       param->vport, protocol, fwmark);
+
+		/*  Set the approprite ativity flag */
+		if (protocol == IPPROTO_TCP) {
+			if (state != IP_VS_TCP_S_ESTABLISHED)
+				flags |= IP_VS_CONN_F_INACTIVE;
+			else
+				flags &= ~IP_VS_CONN_F_INACTIVE;
+		} else if (protocol == IPPROTO_SCTP) {
+			if (state != IP_VS_SCTP_S_ESTABLISHED)
+				flags |= IP_VS_CONN_F_INACTIVE;
+			else
+				flags &= ~IP_VS_CONN_F_INACTIVE;
+		}
+		cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark);
+		if (dest)
+			atomic_dec(&dest->refcnt);
+		if (!cp) {
+			if (param->pe_data)
+				kfree(param->pe_data);
+			IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
+			return;
+		}
+	} else if (!cp->dest) {
+		dest = ip_vs_try_bind_dest(cp);
+		if (dest)
+			atomic_dec(&dest->refcnt);
+	} else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) &&
+		(cp->state != state)) {
+		/* update active/inactive flag for the connection */
+		dest = cp->dest;
+		if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
+			(state != IP_VS_TCP_S_ESTABLISHED)) {
+			atomic_dec(&dest->activeconns);
+			atomic_inc(&dest->inactconns);
+			cp->flags |= IP_VS_CONN_F_INACTIVE;
+		} else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
+			(state == IP_VS_TCP_S_ESTABLISHED)) {
+			atomic_inc(&dest->activeconns);
+			atomic_dec(&dest->inactconns);
+			cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+		}
+	} else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) &&
+		(cp->state != state)) {
+		dest = cp->dest;
+		if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
+		(state != IP_VS_SCTP_S_ESTABLISHED)) {
+			atomic_dec(&dest->activeconns);
+			atomic_inc(&dest->inactconns);
+			cp->flags &= ~IP_VS_CONN_F_INACTIVE;
+		}
+	}
+
+	if (opt)
+		memcpy(&cp->in_seq, opt, sizeof(*opt));
+	atomic_set(&cp->in_pkts, ipvs->sysctl_sync_threshold[0]);
+	cp->state = state;
+	cp->old_state = cp->state;
+	/*
+	 * For Ver 0 messages style
+	 *  - Not possible to recover the right timeout for templates
+	 *  - can not find the right fwmark
+	 *    virtual service. If needed, we can do it for
+	 *    non-fwmark persistent services.
+	 * Ver 1 messages style.
+	 *  - No problem.
+	 */
+	if (timeout) {
+		if (timeout > MAX_SCHEDULE_TIMEOUT / HZ)
+			timeout = MAX_SCHEDULE_TIMEOUT / HZ;
+		cp->timeout = timeout*HZ;
+	} else {
+		struct ip_vs_proto_data *pd;
+
+		pd = ip_vs_proto_data_get(net, protocol);
+		if (!(flags & IP_VS_CONN_F_TEMPLATE) && pd && pd->timeout_table)
+			cp->timeout = pd->timeout_table[state];
+		else
+			cp->timeout = (3*60*HZ);
+	}
+	ip_vs_conn_put(cp);
+}
+
+/*
+ *  Process received multicast message for Version 0
+ */
+static void ip_vs_process_message_v0(struct net *net, const char *buffer,
+				     const size_t buflen)
+{
+	struct ip_vs_sync_mesg_v0 *m = (struct ip_vs_sync_mesg_v0 *)buffer;
+	struct ip_vs_sync_conn_v0 *s;
+	struct ip_vs_sync_conn_options *opt;
+	struct ip_vs_protocol *pp;
 	struct ip_vs_conn_param param;
 	char *p;
 	int i;
 
-	if (buflen < sizeof(struct ip_vs_sync_mesg)) {
-		IP_VS_ERR_RL("sync message header too short\n");
-		return;
-	}
-
-	/* Convert size back to host byte order */
-	m->size = ntohs(m->size);
-
-	if (buflen != m->size) {
-		IP_VS_ERR_RL("bogus sync message size\n");
-		return;
-	}
-
-	/* SyncID sanity check */
-	if (ip_vs_backup_syncid != 0 && m->syncid != ip_vs_backup_syncid) {
-		IP_VS_DBG(7, "Ignoring incoming msg with syncid = %d\n",
-			  m->syncid);
-		return;
-	}
-
-	p = (char *)buffer + sizeof(struct ip_vs_sync_mesg);
+	p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0);
 	for (i=0; i<m->nr_conns; i++) {
 		unsigned flags, state;
 
 		if (p + SIMPLE_CONN_SIZE > buffer+buflen) {
-			IP_VS_ERR_RL("bogus conn in sync message\n");
+			IP_VS_ERR_RL("BACKUP v0, bogus conn\n");
 			return;
 		}
-		s = (struct ip_vs_sync_conn *) p;
+		s = (struct ip_vs_sync_conn_v0 *) p;
 		flags = ntohs(s->flags) | IP_VS_CONN_F_SYNC;
 		flags &= ~IP_VS_CONN_F_HASHED;
 		if (flags & IP_VS_CONN_F_SEQ_MASK) {
 			opt = (struct ip_vs_sync_conn_options *)&s[1];
 			p += FULL_CONN_SIZE;
 			if (p > buffer+buflen) {
-				IP_VS_ERR_RL("bogus conn options in sync message\n");
+				IP_VS_ERR_RL("BACKUP v0, Dropping buffer bogus conn options\n");
 				return;
 			}
 		} else {
@@ -362,118 +864,286 @@
 		if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
 			pp = ip_vs_proto_get(s->protocol);
 			if (!pp) {
-				IP_VS_ERR_RL("Unsupported protocol %u in sync msg\n",
+				IP_VS_DBG(2, "BACKUP v0, Unsupported protocol %u\n",
 					s->protocol);
 				continue;
 			}
 			if (state >= pp->num_states) {
-				IP_VS_DBG(2, "Invalid %s state %u in sync msg\n",
+				IP_VS_DBG(2, "BACKUP v0, Invalid %s state %u\n",
 					pp->name, state);
 				continue;
 			}
 		} else {
 			/* protocol in templates is not used for state/timeout */
-			pp = NULL;
 			if (state > 0) {
-				IP_VS_DBG(2, "Invalid template state %u in sync msg\n",
+				IP_VS_DBG(2, "BACKUP v0, Invalid template state %u\n",
 					state);
 				state = 0;
 			}
 		}
 
-		{
-			if (ip_vs_conn_fill_param_sync(AF_INET, s->protocol,
-					      (union nf_inet_addr *)&s->caddr,
-					      s->cport,
-					      (union nf_inet_addr *)&s->vaddr,
-					      s->vport, &param)) {
-				pr_err("ip_vs_conn_fill_param_sync failed");
-				return;
-			}
-			if (!(flags & IP_VS_CONN_F_TEMPLATE))
-				cp = ip_vs_conn_in_get(&param);
-			else
-				cp = ip_vs_ct_in_get(&param);
-		}
-		if (!cp) {
-			/*
-			 * Find the appropriate destination for the connection.
-			 * If it is not found the connection will remain unbound
-			 * but still handled.
-			 */
-			dest = ip_vs_find_dest(AF_INET,
-					       (union nf_inet_addr *)&s->daddr,
-					       s->dport,
-					       (union nf_inet_addr *)&s->vaddr,
-					       s->vport,
-					       s->protocol);
-			/*  Set the approprite ativity flag */
-			if (s->protocol == IPPROTO_TCP) {
-				if (state != IP_VS_TCP_S_ESTABLISHED)
-					flags |= IP_VS_CONN_F_INACTIVE;
-				else
-					flags &= ~IP_VS_CONN_F_INACTIVE;
-			} else if (s->protocol == IPPROTO_SCTP) {
-				if (state != IP_VS_SCTP_S_ESTABLISHED)
-					flags |= IP_VS_CONN_F_INACTIVE;
-				else
-					flags &= ~IP_VS_CONN_F_INACTIVE;
-			}
-			cp = ip_vs_conn_new(&param,
-					    (union nf_inet_addr *)&s->daddr,
-					    s->dport, flags, dest);
-			if (dest)
-				atomic_dec(&dest->refcnt);
-			if (!cp) {
-				pr_err("ip_vs_conn_new failed\n");
-				return;
-			}
-		} else if (!cp->dest) {
-			dest = ip_vs_try_bind_dest(cp);
-			if (dest)
-				atomic_dec(&dest->refcnt);
-		} else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) &&
-			   (cp->state != state)) {
-			/* update active/inactive flag for the connection */
-			dest = cp->dest;
-			if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
-				(state != IP_VS_TCP_S_ESTABLISHED)) {
-				atomic_dec(&dest->activeconns);
-				atomic_inc(&dest->inactconns);
-				cp->flags |= IP_VS_CONN_F_INACTIVE;
-			} else if ((cp->flags & IP_VS_CONN_F_INACTIVE) &&
-				(state == IP_VS_TCP_S_ESTABLISHED)) {
-				atomic_inc(&dest->activeconns);
-				atomic_dec(&dest->inactconns);
-				cp->flags &= ~IP_VS_CONN_F_INACTIVE;
-			}
-		} else if ((cp->dest) && (cp->protocol == IPPROTO_SCTP) &&
-			   (cp->state != state)) {
-			dest = cp->dest;
-			if (!(cp->flags & IP_VS_CONN_F_INACTIVE) &&
-			     (state != IP_VS_SCTP_S_ESTABLISHED)) {
-			    atomic_dec(&dest->activeconns);
-			    atomic_inc(&dest->inactconns);
-			    cp->flags &= ~IP_VS_CONN_F_INACTIVE;
-			}
-		}
+		ip_vs_conn_fill_param(net, AF_INET, s->protocol,
+				      (const union nf_inet_addr *)&s->caddr,
+				      s->cport,
+				      (const union nf_inet_addr *)&s->vaddr,
+				      s->vport, &param);
 
-		if (opt)
-			memcpy(&cp->in_seq, opt, sizeof(*opt));
-		atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
-		cp->state = state;
-		cp->old_state = cp->state;
-		/*
-		 * We can not recover the right timeout for templates
-		 * in all cases, we can not find the right fwmark
-		 * virtual service. If needed, we can do it for
-		 * non-fwmark persistent services.
-		 */
-		if (!(flags & IP_VS_CONN_F_TEMPLATE) && pp->timeout_table)
-			cp->timeout = pp->timeout_table[state];
-		else
-			cp->timeout = (3*60*HZ);
-		ip_vs_conn_put(cp);
+		/* Send timeout as Zero */
+		ip_vs_proc_conn(net, &param, flags, state, s->protocol, AF_INET,
+				(union nf_inet_addr *)&s->daddr, s->dport,
+				0, 0, opt);
+	}
+}
+
+/*
+ * Handle options
+ */
+static inline int ip_vs_proc_seqopt(__u8 *p, unsigned int plen,
+				    __u32 *opt_flags,
+				    struct ip_vs_sync_conn_options *opt)
+{
+	struct ip_vs_sync_conn_options *topt;
+
+	topt = (struct ip_vs_sync_conn_options *)p;
+
+	if (plen != sizeof(struct ip_vs_sync_conn_options)) {
+		IP_VS_DBG(2, "BACKUP, bogus conn options length\n");
+		return -EINVAL;
+	}
+	if (*opt_flags & IPVS_OPT_F_SEQ_DATA) {
+		IP_VS_DBG(2, "BACKUP, conn options found twice\n");
+		return -EINVAL;
+	}
+	ntoh_seq(&topt->in_seq, &opt->in_seq);
+	ntoh_seq(&topt->out_seq, &opt->out_seq);
+	*opt_flags |= IPVS_OPT_F_SEQ_DATA;
+	return 0;
+}
+
+static int ip_vs_proc_str(__u8 *p, unsigned int plen, unsigned int *data_len,
+			  __u8 **data, unsigned int maxlen,
+			  __u32 *opt_flags, __u32 flag)
+{
+	if (plen > maxlen) {
+		IP_VS_DBG(2, "BACKUP, bogus par.data len > %d\n", maxlen);
+		return -EINVAL;
+	}
+	if (*opt_flags & flag) {
+		IP_VS_DBG(2, "BACKUP, Par.data found twice 0x%x\n", flag);
+		return -EINVAL;
+	}
+	*data_len = plen;
+	*data = p;
+	*opt_flags |= flag;
+	return 0;
+}
+/*
+ *   Process a Version 1 sync. connection
+ */
+static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end)
+{
+	struct ip_vs_sync_conn_options opt;
+	union  ip_vs_sync_conn *s;
+	struct ip_vs_protocol *pp;
+	struct ip_vs_conn_param param;
+	__u32 flags;
+	unsigned int af, state, pe_data_len=0, pe_name_len=0;
+	__u8 *pe_data=NULL, *pe_name=NULL;
+	__u32 opt_flags=0;
+	int retc=0;
+
+	s = (union ip_vs_sync_conn *) p;
+
+	if (s->v6.type & STYPE_F_INET6) {
+#ifdef CONFIG_IP_VS_IPV6
+		af = AF_INET6;
+		p += sizeof(struct ip_vs_sync_v6);
+#else
+		IP_VS_DBG(3,"BACKUP, IPv6 msg received, and IPVS is not compiled for IPv6\n");
+		retc = 10;
+		goto out;
+#endif
+	} else if (!s->v4.type) {
+		af = AF_INET;
+		p += sizeof(struct ip_vs_sync_v4);
+	} else {
+		return -10;
+	}
+	if (p > msg_end)
+		return -20;
+
+	/* Process optional params check Type & Len. */
+	while (p < msg_end) {
+		int ptype;
+		int plen;
+
+		if (p+2 > msg_end)
+			return -30;
+		ptype = *(p++);
+		plen  = *(p++);
+
+		if (!plen || ((p + plen) > msg_end))
+			return -40;
+		/* Handle seq option  p = param data */
+		switch (ptype & ~IPVS_OPT_F_PARAM) {
+		case IPVS_OPT_SEQ_DATA:
+			if (ip_vs_proc_seqopt(p, plen, &opt_flags, &opt))
+				return -50;
+			break;
+
+		case IPVS_OPT_PE_DATA:
+			if (ip_vs_proc_str(p, plen, &pe_data_len, &pe_data,
+					   IP_VS_PEDATA_MAXLEN, &opt_flags,
+					   IPVS_OPT_F_PE_DATA))
+				return -60;
+			break;
+
+		case IPVS_OPT_PE_NAME:
+			if (ip_vs_proc_str(p, plen,&pe_name_len, &pe_name,
+					   IP_VS_PENAME_MAXLEN, &opt_flags,
+					   IPVS_OPT_F_PE_NAME))
+				return -70;
+			break;
+
+		default:
+			/* Param data mandatory ? */
+			if (!(ptype & IPVS_OPT_F_PARAM)) {
+				IP_VS_DBG(3, "BACKUP, Unknown mandatory param %d found\n",
+					  ptype & ~IPVS_OPT_F_PARAM);
+				retc = 20;
+				goto out;
+			}
+		}
+		p += plen;  /* Next option */
+	}
+
+	/* Get flags and Mask off unsupported */
+	flags  = ntohl(s->v4.flags) & IP_VS_CONN_F_BACKUP_MASK;
+	flags |= IP_VS_CONN_F_SYNC;
+	state = ntohs(s->v4.state);
+
+	if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
+		pp = ip_vs_proto_get(s->v4.protocol);
+		if (!pp) {
+			IP_VS_DBG(3,"BACKUP, Unsupported protocol %u\n",
+				s->v4.protocol);
+			retc = 30;
+			goto out;
+		}
+		if (state >= pp->num_states) {
+			IP_VS_DBG(3, "BACKUP, Invalid %s state %u\n",
+				pp->name, state);
+			retc = 40;
+			goto out;
+		}
+	} else {
+		/* protocol in templates is not used for state/timeout */
+		if (state > 0) {
+			IP_VS_DBG(3, "BACKUP, Invalid template state %u\n",
+				state);
+			state = 0;
+		}
+	}
+	if (ip_vs_conn_fill_param_sync(net, af, s, &param, pe_data,
+				       pe_data_len, pe_name, pe_name_len)) {
+		retc = 50;
+		goto out;
+	}
+	/* If only IPv4, just silent skip IPv6 */
+	if (af == AF_INET)
+		ip_vs_proc_conn(net, &param, flags, state, s->v4.protocol, af,
+				(union nf_inet_addr *)&s->v4.daddr, s->v4.dport,
+				ntohl(s->v4.timeout), ntohl(s->v4.fwmark),
+				(opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
+				);
+#ifdef CONFIG_IP_VS_IPV6
+	else
+		ip_vs_proc_conn(net, &param, flags, state, s->v6.protocol, af,
+				(union nf_inet_addr *)&s->v6.daddr, s->v6.dport,
+				ntohl(s->v6.timeout), ntohl(s->v6.fwmark),
+				(opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL)
+				);
+#endif
+	return 0;
+	/* Error exit */
+out:
+	IP_VS_DBG(2, "BACKUP, Single msg dropped err:%d\n", retc);
+	return retc;
+
+}
+/*
+ *      Process received multicast message and create the corresponding
+ *      ip_vs_conn entries.
+ *      Handles Version 0 & 1
+ */
+static void ip_vs_process_message(struct net *net, __u8 *buffer,
+				  const size_t buflen)
+{
+	struct netns_ipvs *ipvs = net_ipvs(net);
+	struct ip_vs_sync_mesg *m2 = (struct ip_vs_sync_mesg *)buffer;
+	__u8 *p, *msg_end;
+	int i, nr_conns;
+
+	if (buflen < sizeof(struct ip_vs_sync_mesg_v0)) {
+		IP_VS_DBG(2, "BACKUP, message header too short\n");
+		return;
+	}
+	/* Convert size back to host byte order */
+	m2->size = ntohs(m2->size);
+
+	if (buflen != m2->size) {
+		IP_VS_DBG(2, "BACKUP, bogus message size\n");
+		return;
+	}
+	/* SyncID sanity check */
+	if (ipvs->backup_syncid != 0 && m2->syncid != ipvs->backup_syncid) {
+		IP_VS_DBG(7, "BACKUP, Ignoring syncid = %d\n", m2->syncid);
+		return;
+	}
+	/* Handle version 1  message */
+	if ((m2->version == SYNC_PROTO_VER) && (m2->reserved == 0)
+	    && (m2->spare == 0)) {
+
+		msg_end = buffer + sizeof(struct ip_vs_sync_mesg);
+		nr_conns = m2->nr_conns;
+
+		for (i=0; i<nr_conns; i++) {
+			union ip_vs_sync_conn *s;
+			unsigned size;
+			int retc;
+
+			p = msg_end;
+			if (p + sizeof(s->v4) > buffer+buflen) {
+				IP_VS_ERR_RL("BACKUP, Dropping buffer, to small\n");
+				return;
+			}
+			s = (union ip_vs_sync_conn *)p;
+			size = ntohs(s->v4.ver_size) & SVER_MASK;
+			msg_end = p + size;
+			/* Basic sanity checks */
+			if (msg_end  > buffer+buflen) {
+				IP_VS_ERR_RL("BACKUP, Dropping buffer, msg > buffer\n");
+				return;
+			}
+			if (ntohs(s->v4.ver_size) >> SVER_SHIFT) {
+				IP_VS_ERR_RL("BACKUP, Dropping buffer, Unknown version %d\n",
+					      ntohs(s->v4.ver_size) >> SVER_SHIFT);
+				return;
+			}
+			/* Process a single sync_conn */
+			retc = ip_vs_proc_sync_conn(net, p, msg_end);
+			if (retc < 0) {
+				IP_VS_ERR_RL("BACKUP, Dropping buffer, Err: %d in decoding\n",
+					     retc);
+				return;
+			}
+			/* Make sure we have 32 bit alignment */
+			msg_end = p + ((size + 3) & ~3);
+		}
+	} else {
+		/* Old type of message */
+		ip_vs_process_message_v0(net, buffer, buflen);
+		return;
 	}
 }
 
@@ -511,8 +1181,10 @@
 {
 	struct net_device *dev;
 	struct inet_sock *inet = inet_sk(sk);
+	struct net *net = sock_net(sk);
 
-	if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+	dev = __dev_get_by_name(net, ifname);
+	if (!dev)
 		return -ENODEV;
 
 	if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
@@ -531,30 +1203,33 @@
  *	Set the maximum length of sync message according to the
  *	specified interface's MTU.
  */
-static int set_sync_mesg_maxlen(int sync_state)
+static int set_sync_mesg_maxlen(struct net *net, int sync_state)
 {
+	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct net_device *dev;
 	int num;
 
 	if (sync_state == IP_VS_STATE_MASTER) {
-		if ((dev = __dev_get_by_name(&init_net, ip_vs_master_mcast_ifn)) == NULL)
+		dev = __dev_get_by_name(net, ipvs->master_mcast_ifn);
+		if (!dev)
 			return -ENODEV;
 
 		num = (dev->mtu - sizeof(struct iphdr) -
 		       sizeof(struct udphdr) -
 		       SYNC_MESG_HEADER_LEN - 20) / SIMPLE_CONN_SIZE;
-		sync_send_mesg_maxlen = SYNC_MESG_HEADER_LEN +
+		ipvs->send_mesg_maxlen = SYNC_MESG_HEADER_LEN +
 			SIMPLE_CONN_SIZE * min(num, MAX_CONNS_PER_SYNCBUFF);
 		IP_VS_DBG(7, "setting the maximum length of sync sending "
-			  "message %d.\n", sync_send_mesg_maxlen);
+			  "message %d.\n", ipvs->send_mesg_maxlen);
 	} else if (sync_state == IP_VS_STATE_BACKUP) {
-		if ((dev = __dev_get_by_name(&init_net, ip_vs_backup_mcast_ifn)) == NULL)
+		dev = __dev_get_by_name(net, ipvs->backup_mcast_ifn);
+		if (!dev)
 			return -ENODEV;
 
-		sync_recv_mesg_maxlen = dev->mtu -
+		ipvs->recv_mesg_maxlen = dev->mtu -
 			sizeof(struct iphdr) - sizeof(struct udphdr);
 		IP_VS_DBG(7, "setting the maximum length of sync receiving "
-			  "message %d.\n", sync_recv_mesg_maxlen);
+			  "message %d.\n", ipvs->recv_mesg_maxlen);
 	}
 
 	return 0;
@@ -569,6 +1244,7 @@
 static int
 join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
 {
+	struct net *net = sock_net(sk);
 	struct ip_mreqn mreq;
 	struct net_device *dev;
 	int ret;
@@ -576,7 +1252,8 @@
 	memset(&mreq, 0, sizeof(mreq));
 	memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
 
-	if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+	dev = __dev_get_by_name(net, ifname);
+	if (!dev)
 		return -ENODEV;
 	if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
 		return -EINVAL;
@@ -593,11 +1270,13 @@
 
 static int bind_mcastif_addr(struct socket *sock, char *ifname)
 {
+	struct net *net = sock_net(sock->sk);
 	struct net_device *dev;
 	__be32 addr;
 	struct sockaddr_in sin;
 
-	if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+	dev = __dev_get_by_name(net, ifname);
+	if (!dev)
 		return -ENODEV;
 
 	addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
@@ -619,19 +1298,20 @@
 /*
  *      Set up sending multicast socket over UDP
  */
-static struct socket * make_send_sock(void)
+static struct socket *make_send_sock(struct net *net)
 {
+	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct socket *sock;
 	int result;
 
 	/* First create a socket */
-	result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+	result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
 	if (result < 0) {
 		pr_err("Error during creation of socket; terminating\n");
 		return ERR_PTR(result);
 	}
 
-	result = set_mcast_if(sock->sk, ip_vs_master_mcast_ifn);
+	result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn);
 	if (result < 0) {
 		pr_err("Error setting outbound mcast interface\n");
 		goto error;
@@ -640,7 +1320,7 @@
 	set_mcast_loop(sock->sk, 0);
 	set_mcast_ttl(sock->sk, 1);
 
-	result = bind_mcastif_addr(sock, ip_vs_master_mcast_ifn);
+	result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn);
 	if (result < 0) {
 		pr_err("Error binding address of the mcast interface\n");
 		goto error;
@@ -664,13 +1344,14 @@
 /*
  *      Set up receiving multicast socket over UDP
  */
-static struct socket * make_receive_sock(void)
+static struct socket *make_receive_sock(struct net *net)
 {
+	struct netns_ipvs *ipvs = net_ipvs(net);
 	struct socket *sock;
 	int result;
 
 	/* First create a socket */
-	result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
+	result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
 	if (result < 0) {
 		pr_err("Error during creation of socket; terminating\n");
 		return ERR_PTR(result);
@@ -689,7 +1370,7 @@
 	/* join the multicast group */
 	result = join_mcast_group(sock->sk,
 			(struct in_addr *) &mcast_addr.sin_addr,
-			ip_vs_backup_mcast_ifn);
+			ipvs->backup_mcast_ifn);
 	if (result < 0) {
 		pr_err("Error joining to the multicast group\n");
 		goto error;
@@ -760,20 +1441,21 @@
 static int sync_thread_master(void *data)
 {
 	struct ip_vs_sync_thread_data *tinfo = data;
+	struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
 	struct ip_vs_sync_buff *sb;
 
 	pr_info("sync thread started: state = MASTER, mcast_ifn = %s, "
 		"syncid = %d\n",
-		ip_vs_master_mcast_ifn, ip_vs_master_syncid);
+		ipvs->master_mcast_ifn, ipvs->master_syncid);
 
 	while (!kthread_should_stop()) {
-		while ((sb = sb_dequeue())) {
+		while ((sb = sb_dequeue(ipvs))) {
 			ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
 			ip_vs_sync_buff_release(sb);
 		}
 
-		/* check if entries stay in curr_sb for 2 seconds */
-		sb = get_curr_sync_buff(2 * HZ);
+		/* check if entries stay in ipvs->sync_buff for 2 seconds */
+		sb = get_curr_sync_buff(ipvs, 2 * HZ);
 		if (sb) {
 			ip_vs_send_sync_msg(tinfo->sock, sb->mesg);
 			ip_vs_sync_buff_release(sb);
@@ -783,14 +1465,13 @@
 	}
 
 	/* clean up the sync_buff queue */
-	while ((sb=sb_dequeue())) {
+	while ((sb = sb_dequeue(ipvs)))
 		ip_vs_sync_buff_release(sb);
-	}
 
 	/* clean up the current sync_buff */
-	if ((sb = get_curr_sync_buff(0))) {
+	sb = get_curr_sync_buff(ipvs, 0);
+	if (sb)
 		ip_vs_sync_buff_release(sb);
-	}
 
 	/* release the sending multicast socket */
 	sock_release(tinfo->sock);
@@ -803,11 +1484,12 @@
 static int sync_thread_backup(void *data)
 {
 	struct ip_vs_sync_thread_data *tinfo = data;
+	struct netns_ipvs *ipvs = net_ipvs(tinfo->net);
 	int len;
 
 	pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
 		"syncid = %d\n",
-		ip_vs_backup_mcast_ifn, ip_vs_backup_syncid);
+		ipvs->backup_mcast_ifn, ipvs->backup_syncid);
 
 	while (!kthread_should_stop()) {
 		wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
@@ -817,7 +1499,7 @@
 		/* do we have data now? */
 		while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) {
 			len = ip_vs_receive(tinfo->sock, tinfo->buf,
-					sync_recv_mesg_maxlen);
+					ipvs->recv_mesg_maxlen);
 			if (len <= 0) {
 				pr_err("receiving message error\n");
 				break;
@@ -826,7 +1508,7 @@
 			/* disable bottom half, because it accesses the data
 			   shared by softirq while getting/creating conns */
 			local_bh_disable();
-			ip_vs_process_message(tinfo->buf, len);
+			ip_vs_process_message(tinfo->net, tinfo->buf, len);
 			local_bh_enable();
 		}
 	}
@@ -840,41 +1522,42 @@
 }
 
 
-int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
+int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid)
 {
 	struct ip_vs_sync_thread_data *tinfo;
 	struct task_struct **realtask, *task;
 	struct socket *sock;
+	struct netns_ipvs *ipvs = net_ipvs(net);
 	char *name, *buf = NULL;
 	int (*threadfn)(void *data);
 	int result = -ENOMEM;
 
 	IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
 	IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
-		  sizeof(struct ip_vs_sync_conn));
+		  sizeof(struct ip_vs_sync_conn_v0));
 
 	if (state == IP_VS_STATE_MASTER) {
-		if (sync_master_thread)
+		if (ipvs->master_thread)
 			return -EEXIST;
 
-		strlcpy(ip_vs_master_mcast_ifn, mcast_ifn,
-			sizeof(ip_vs_master_mcast_ifn));
-		ip_vs_master_syncid = syncid;
-		realtask = &sync_master_thread;
-		name = "ipvs_syncmaster";
+		strlcpy(ipvs->master_mcast_ifn, mcast_ifn,
+			sizeof(ipvs->master_mcast_ifn));
+		ipvs->master_syncid = syncid;
+		realtask = &ipvs->master_thread;
+		name = "ipvs_master:%d";
 		threadfn = sync_thread_master;
-		sock = make_send_sock();
+		sock = make_send_sock(net);
 	} else if (state == IP_VS_STATE_BACKUP) {
-		if (sync_backup_thread)
+		if (ipvs->backup_thread)
 			return -EEXIST;
 
-		strlcpy(ip_vs_backup_mcast_ifn, mcast_ifn,
-			sizeof(ip_vs_backup_mcast_ifn));
-		ip_vs_backup_syncid = syncid;
-		realtask = &sync_backup_thread;
-		name = "ipvs_syncbackup";
+		strlcpy(ipvs->backup_mcast_ifn, mcast_ifn,
+			sizeof(ipvs->backup_mcast_ifn));
+		ipvs->backup_syncid = syncid;
+		realtask = &ipvs->backup_thread;
+		name = "ipvs_backup:%d";
 		threadfn = sync_thread_backup;
-		sock = make_receive_sock();
+		sock = make_receive_sock(net);
 	} else {
 		return -EINVAL;
 	}
@@ -884,9 +1567,9 @@
 		goto out;
 	}
 
-	set_sync_mesg_maxlen(state);
+	set_sync_mesg_maxlen(net, state);
 	if (state == IP_VS_STATE_BACKUP) {
-		buf = kmalloc(sync_recv_mesg_maxlen, GFP_KERNEL);
+		buf = kmalloc(ipvs->recv_mesg_maxlen, GFP_KERNEL);
 		if (!buf)
 			goto outsocket;
 	}
@@ -895,10 +1578,11 @@
 	if (!tinfo)
 		goto outbuf;
 
+	tinfo->net = net;
 	tinfo->sock = sock;
 	tinfo->buf = buf;
 
-	task = kthread_run(threadfn, tinfo, name);
+	task = kthread_run(threadfn, tinfo, name, ipvs->gen);
 	if (IS_ERR(task)) {
 		result = PTR_ERR(task);
 		goto outtinfo;
@@ -906,7 +1590,7 @@
 
 	/* mark as active */
 	*realtask = task;
-	ip_vs_sync_state |= state;
+	ipvs->sync_state |= state;
 
 	/* increase the module use count */
 	ip_vs_use_count_inc();
@@ -924,16 +1608,18 @@
 }
 
 
-int stop_sync_thread(int state)
+int stop_sync_thread(struct net *net, int state)
 {
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
 	IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
 
 	if (state == IP_VS_STATE_MASTER) {
-		if (!sync_master_thread)
+		if (!ipvs->master_thread)
 			return -ESRCH;
 
 		pr_info("stopping master sync thread %d ...\n",
-			task_pid_nr(sync_master_thread));
+			task_pid_nr(ipvs->master_thread));
 
 		/*
 		 * The lock synchronizes with sb_queue_tail(), so that we don't
@@ -941,21 +1627,21 @@
 		 * progress of stopping the master sync daemon.
 		 */
 
-		spin_lock_bh(&ip_vs_sync_lock);
-		ip_vs_sync_state &= ~IP_VS_STATE_MASTER;
-		spin_unlock_bh(&ip_vs_sync_lock);
-		kthread_stop(sync_master_thread);
-		sync_master_thread = NULL;
+		spin_lock_bh(&ipvs->sync_lock);
+		ipvs->sync_state &= ~IP_VS_STATE_MASTER;
+		spin_unlock_bh(&ipvs->sync_lock);
+		kthread_stop(ipvs->master_thread);
+		ipvs->master_thread = NULL;
 	} else if (state == IP_VS_STATE_BACKUP) {
-		if (!sync_backup_thread)
+		if (!ipvs->backup_thread)
 			return -ESRCH;
 
 		pr_info("stopping backup sync thread %d ...\n",
-			task_pid_nr(sync_backup_thread));
+			task_pid_nr(ipvs->backup_thread));
 
-		ip_vs_sync_state &= ~IP_VS_STATE_BACKUP;
-		kthread_stop(sync_backup_thread);
-		sync_backup_thread = NULL;
+		ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
+		kthread_stop(ipvs->backup_thread);
+		ipvs->backup_thread = NULL;
 	} else {
 		return -EINVAL;
 	}
@@ -965,3 +1651,42 @@
 
 	return 0;
 }
+
+/*
+ * Initialize data struct for each netns
+ */
+static int __net_init __ip_vs_sync_init(struct net *net)
+{
+	struct netns_ipvs *ipvs = net_ipvs(net);
+
+	INIT_LIST_HEAD(&ipvs->sync_queue);
+	spin_lock_init(&ipvs->sync_lock);
+	spin_lock_init(&ipvs->sync_buff_lock);
+
+	ipvs->sync_mcast_addr.sin_family = AF_INET;
+	ipvs->sync_mcast_addr.sin_port = cpu_to_be16(IP_VS_SYNC_PORT);
+	ipvs->sync_mcast_addr.sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP);
+	return 0;
+}
+
+static void __ip_vs_sync_cleanup(struct net *net)
+{
+	stop_sync_thread(net, IP_VS_STATE_MASTER);
+	stop_sync_thread(net, IP_VS_STATE_BACKUP);
+}
+
+static struct pernet_operations ipvs_sync_ops = {
+	.init = __ip_vs_sync_init,
+	.exit = __ip_vs_sync_cleanup,
+};
+
+
+int __init ip_vs_sync_init(void)
+{
+	return register_pernet_subsys(&ipvs_sync_ops);
+}
+
+void ip_vs_sync_cleanup(void)
+{
+	unregister_pernet_subsys(&ipvs_sync_ops);
+}
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 5325a3f..1f2a4e3 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -175,7 +175,6 @@
 			.fl4_tos = RT_TOS(iph->tos),
 			.mark = skb->mark,
 		};
-		struct rtable *rt;
 
 		if (ip_route_output_key(net, &rt, &fl))
 			return 0;
@@ -390,7 +389,8 @@
 
 	/* MTU checking */
 	mtu = dst_mtu(&rt->dst);
-	if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
+	if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
+	    !skb_is_gso(skb)) {
 		ip_rt_put(rt);
 		icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
@@ -443,7 +443,7 @@
 
 	/* MTU checking */
 	mtu = dst_mtu(&rt->dst);
-	if (skb->len > mtu) {
+	if (skb->len > mtu && !skb_is_gso(skb)) {
 		if (!skb->dev) {
 			struct net *net = dev_net(skb_dst(skb)->dev);
 
@@ -543,7 +543,8 @@
 
 	/* MTU checking */
 	mtu = dst_mtu(&rt->dst);
-	if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
+	if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
+	    !skb_is_gso(skb)) {
 		icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
 		IP_VS_DBG_RL_PKT(0, AF_INET, pp, skb, 0,
 				 "ip_vs_nat_xmit(): frag needed for");
@@ -658,7 +659,7 @@
 
 	/* MTU checking */
 	mtu = dst_mtu(&rt->dst);
-	if (skb->len > mtu) {
+	if (skb->len > mtu && !skb_is_gso(skb)) {
 		if (!skb->dev) {
 			struct net *net = dev_net(skb_dst(skb)->dev);
 
@@ -773,8 +774,8 @@
 
 	df |= (old_iph->frag_off & htons(IP_DF));
 
-	if ((old_iph->frag_off & htons(IP_DF))
-	    && mtu < ntohs(old_iph->tot_len)) {
+	if ((old_iph->frag_off & htons(IP_DF) &&
+	    mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb))) {
 		icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
 		goto tx_error_put;
@@ -886,7 +887,8 @@
 	if (skb_dst(skb))
 		skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
 
-	if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
+	if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr) &&
+	    !skb_is_gso(skb)) {
 		if (!skb->dev) {
 			struct net *net = dev_net(skb_dst(skb)->dev);
 
@@ -991,7 +993,8 @@
 
 	/* MTU checking */
 	mtu = dst_mtu(&rt->dst);
-	if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
+	if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu &&
+	    !skb_is_gso(skb)) {
 		icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
 		ip_rt_put(rt);
 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
@@ -1158,7 +1161,8 @@
 
 	/* MTU checking */
 	mtu = dst_mtu(&rt->dst);
-	if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) {
+	if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF)) &&
+	    !skb_is_gso(skb)) {
 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
 		IP_VS_DBG_RL("%s(): frag needed\n", __func__);
 		goto tx_error_put;
@@ -1272,7 +1276,7 @@
 
 	/* MTU checking */
 	mtu = dst_mtu(&rt->dst);
-	if (skb->len > mtu) {
+	if (skb->len > mtu && !skb_is_gso(skb)) {
 		if (!skb->dev) {
 			struct net *net = dev_net(skb_dst(skb)->dev);
 
diff --git a/net/netfilter/nf_conntrack_broadcast.c b/net/netfilter/nf_conntrack_broadcast.c
new file mode 100644
index 0000000..4e99cca
--- /dev/null
+++ b/net/netfilter/nf_conntrack_broadcast.c
@@ -0,0 +1,82 @@
+/*
+ *      broadcast connection tracking helper
+ *
+ *      (c) 2005 Patrick McHardy <kaber@trash.net>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <net/route.h>
+#include <linux/inetdevice.h>
+#include <linux/skbuff.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+
+int nf_conntrack_broadcast_help(struct sk_buff *skb,
+				unsigned int protoff,
+				struct nf_conn *ct,
+				enum ip_conntrack_info ctinfo,
+				unsigned int timeout)
+{
+	struct nf_conntrack_expect *exp;
+	struct iphdr *iph = ip_hdr(skb);
+	struct rtable *rt = skb_rtable(skb);
+	struct in_device *in_dev;
+	struct nf_conn_help *help = nfct_help(ct);
+	__be32 mask = 0;
+
+	/* we're only interested in locally generated packets */
+	if (skb->sk == NULL)
+		goto out;
+	if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
+		goto out;
+	if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
+		goto out;
+
+	rcu_read_lock();
+	in_dev = __in_dev_get_rcu(rt->dst.dev);
+	if (in_dev != NULL) {
+		for_primary_ifa(in_dev) {
+			if (ifa->ifa_broadcast == iph->daddr) {
+				mask = ifa->ifa_mask;
+				break;
+			}
+		} endfor_ifa(in_dev);
+	}
+	rcu_read_unlock();
+
+	if (mask == 0)
+		goto out;
+
+	exp = nf_ct_expect_alloc(ct);
+	if (exp == NULL)
+		goto out;
+
+	exp->tuple                = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+	exp->tuple.src.u.udp.port = help->helper->tuple.src.u.udp.port;
+
+	exp->mask.src.u3.ip       = mask;
+	exp->mask.src.u.udp.port  = htons(0xFFFF);
+
+	exp->expectfn             = NULL;
+	exp->flags                = NF_CT_EXPECT_PERMANENT;
+	exp->class		  = NF_CT_EXPECT_CLASS_DEFAULT;
+	exp->helper               = NULL;
+
+	nf_ct_expect_related(exp);
+	nf_ct_expect_put(exp);
+
+	nf_ct_refresh(ct, skb, timeout * HZ);
+out:
+	return NF_ACCEPT;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_broadcast_help);
+
+MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index e615119..2f454ef 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -43,6 +43,7 @@
 #include <net/netfilter/nf_conntrack_acct.h>
 #include <net/netfilter/nf_conntrack_ecache.h>
 #include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
 #include <net/netfilter/nf_nat.h>
 #include <net/netfilter/nf_nat_core.h>
 
@@ -282,6 +283,11 @@
 static void death_by_timeout(unsigned long ul_conntrack)
 {
 	struct nf_conn *ct = (void *)ul_conntrack;
+	struct nf_conn_tstamp *tstamp;
+
+	tstamp = nf_conn_tstamp_find(ct);
+	if (tstamp && tstamp->stop == 0)
+		tstamp->stop = ktime_to_ns(ktime_get_real());
 
 	if (!test_bit(IPS_DYING_BIT, &ct->status) &&
 	    unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
@@ -419,6 +425,7 @@
 	struct nf_conntrack_tuple_hash *h;
 	struct nf_conn *ct;
 	struct nf_conn_help *help;
+	struct nf_conn_tstamp *tstamp;
 	struct hlist_nulls_node *n;
 	enum ip_conntrack_info ctinfo;
 	struct net *net;
@@ -486,8 +493,16 @@
 	ct->timeout.expires += jiffies;
 	add_timer(&ct->timeout);
 	atomic_inc(&ct->ct_general.use);
-	set_bit(IPS_CONFIRMED_BIT, &ct->status);
+	ct->status |= IPS_CONFIRMED;
 
+	/* set conntrack timestamp, if enabled. */
+	tstamp = nf_conn_tstamp_find(ct);
+	if (tstamp) {
+		if (skb->tstamp.tv64 == 0)
+			__net_timestamp((struct sk_buff *)skb);
+
+		tstamp->start = ktime_to_ns(skb->tstamp);
+	}
 	/* Since the lookup is lockless, hash insertion must be done after
 	 * starting the timer and setting the CONFIRMED bit. The RCU barriers
 	 * guarantee that no other CPU can find the conntrack before the above
@@ -655,7 +670,8 @@
 	 * and ct->tuplehash[IP_CT_DIR_REPLY].hnnode.next unchanged.
 	 */
 	memset(&ct->tuplehash[IP_CT_DIR_MAX], 0,
-	       sizeof(*ct) - offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
+	       offsetof(struct nf_conn, proto) -
+	       offsetof(struct nf_conn, tuplehash[IP_CT_DIR_MAX]));
 	spin_lock_init(&ct->lock);
 	ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
 	ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
@@ -745,6 +761,7 @@
 	}
 
 	nf_ct_acct_ext_add(ct, GFP_ATOMIC);
+	nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
 
 	ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
 	nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
@@ -942,8 +959,15 @@
 	if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
 		nf_conntrack_event_cache(IPCT_REPLY, ct);
 out:
-	if (tmpl)
-		nf_ct_put(tmpl);
+	if (tmpl) {
+		/* Special case: we have to repeat this hook, assign the
+		 * template again to this packet. We assume that this packet
+		 * has no conntrack assigned. This is used by nf_ct_tcp. */
+		if (ret == NF_REPEAT)
+			skb->nfct = (struct nf_conntrack *)tmpl;
+		else
+			nf_ct_put(tmpl);
+	}
 
 	return ret;
 }
@@ -1185,6 +1209,11 @@
 static int kill_report(struct nf_conn *i, void *data)
 {
 	struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
+	struct nf_conn_tstamp *tstamp;
+
+	tstamp = nf_conn_tstamp_find(i);
+	if (tstamp && tstamp->stop == 0)
+		tstamp->stop = ktime_to_ns(ktime_get_real());
 
 	/* If we fail to deliver the event, death_by_timeout() will retry */
 	if (nf_conntrack_event_report(IPCT_DESTROY, i,
@@ -1201,9 +1230,9 @@
 	return 1;
 }
 
-void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size)
+void nf_ct_free_hashtable(void *hash, unsigned int size)
 {
-	if (vmalloced)
+	if (is_vmalloc_addr(hash))
 		vfree(hash);
 	else
 		free_pages((unsigned long)hash,
@@ -1270,8 +1299,7 @@
 		goto i_see_dead_people;
 	}
 
-	nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
-			     net->ct.htable_size);
+	nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
 	nf_conntrack_ecache_fini(net);
 	nf_conntrack_acct_fini(net);
 	nf_conntrack_expect_fini(net);
@@ -1300,21 +1328,18 @@
 	}
 }
 
-void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls)
+void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
 {
 	struct hlist_nulls_head *hash;
 	unsigned int nr_slots, i;
 	size_t sz;
 
-	*vmalloced = 0;
-
 	BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
 	nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
 	sz = nr_slots * sizeof(struct hlist_nulls_head);
 	hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
 					get_order(sz));
 	if (!hash) {
-		*vmalloced = 1;
 		printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
 		hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
 				 PAGE_KERNEL);
@@ -1330,7 +1355,7 @@
 
 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
 {
-	int i, bucket, vmalloced, old_vmalloced;
+	int i, bucket;
 	unsigned int hashsize, old_size;
 	struct hlist_nulls_head *hash, *old_hash;
 	struct nf_conntrack_tuple_hash *h;
@@ -1347,7 +1372,7 @@
 	if (!hashsize)
 		return -EINVAL;
 
-	hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1);
+	hash = nf_ct_alloc_hashtable(&hashsize, 1);
 	if (!hash)
 		return -ENOMEM;
 
@@ -1369,15 +1394,13 @@
 		}
 	}
 	old_size = init_net.ct.htable_size;
-	old_vmalloced = init_net.ct.hash_vmalloc;
 	old_hash = init_net.ct.hash;
 
 	init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
-	init_net.ct.hash_vmalloc = vmalloced;
 	init_net.ct.hash = hash;
 	spin_unlock_bh(&nf_conntrack_lock);
 
-	nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
+	nf_ct_free_hashtable(old_hash, old_size);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
@@ -1490,8 +1513,7 @@
 	}
 
 	net->ct.htable_size = nf_conntrack_htable_size;
-	net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
-					     &net->ct.hash_vmalloc, 1);
+	net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
 	if (!net->ct.hash) {
 		ret = -ENOMEM;
 		printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
@@ -1503,6 +1525,9 @@
 	ret = nf_conntrack_acct_init(net);
 	if (ret < 0)
 		goto err_acct;
+	ret = nf_conntrack_tstamp_init(net);
+	if (ret < 0)
+		goto err_tstamp;
 	ret = nf_conntrack_ecache_init(net);
 	if (ret < 0)
 		goto err_ecache;
@@ -1510,12 +1535,13 @@
 	return 0;
 
 err_ecache:
+	nf_conntrack_tstamp_fini(net);
+err_tstamp:
 	nf_conntrack_acct_fini(net);
 err_acct:
 	nf_conntrack_expect_fini(net);
 err_expect:
-	nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
-			     net->ct.htable_size);
+	nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
 err_hash:
 	kmem_cache_destroy(net->ct.nf_conntrack_cachep);
 err_cache:
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 5702de3..63a1b91 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -63,6 +63,9 @@
 		 * this does not harm and it happens very rarely. */
 		unsigned long missed = e->missed;
 
+		if (!((events | missed) & e->ctmask))
+			goto out_unlock;
+
 		ret = notify->fcn(events | missed, &item);
 		if (unlikely(ret < 0 || missed)) {
 			spin_lock_bh(&ct->lock);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index a20fb0b..cd1e8e0 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -319,7 +319,8 @@
 	const struct nf_conntrack_expect_policy *p;
 	unsigned int h = nf_ct_expect_dst_hash(&exp->tuple);
 
-	atomic_inc(&exp->use);
+	/* two references : one for hash insert, one for the timer */
+	atomic_add(2, &exp->use);
 
 	if (master_help) {
 		hlist_add_head(&exp->lnode, &master_help->expectations);
@@ -333,12 +334,14 @@
 	setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
 		    (unsigned long)exp);
 	if (master_help) {
-		p = &master_help->helper->expect_policy[exp->class];
+		p = &rcu_dereference_protected(
+				master_help->helper,
+				lockdep_is_held(&nf_conntrack_lock)
+				)->expect_policy[exp->class];
 		exp->timeout.expires = jiffies + p->timeout * HZ;
 	}
 	add_timer(&exp->timeout);
 
-	atomic_inc(&exp->use);
 	NF_CT_STAT_INC(net, expect_create);
 }
 
@@ -369,7 +372,10 @@
 	if (!del_timer(&i->timeout))
 		return 0;
 
-	p = &master_help->helper->expect_policy[i->class];
+	p = &rcu_dereference_protected(
+		master_help->helper,
+		lockdep_is_held(&nf_conntrack_lock)
+		)->expect_policy[i->class];
 	i->timeout.expires = jiffies + p->timeout * HZ;
 	add_timer(&i->timeout);
 	return 1;
@@ -407,7 +413,10 @@
 	}
 	/* Will be over limit? */
 	if (master_help) {
-		p = &master_help->helper->expect_policy[expect->class];
+		p = &rcu_dereference_protected(
+			master_help->helper,
+			lockdep_is_held(&nf_conntrack_lock)
+			)->expect_policy[expect->class];
 		if (p->max_expected &&
 		    master_help->expecting[expect->class] >= p->max_expected) {
 			evict_oldest_expect(master, expect);
@@ -478,7 +487,7 @@
 	struct hlist_node *n;
 
 	for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
-		n = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+		n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
 		if (n)
 			return n;
 	}
@@ -491,11 +500,11 @@
 	struct net *net = seq_file_net(seq);
 	struct ct_expect_iter_state *st = seq->private;
 
-	head = rcu_dereference(head->next);
+	head = rcu_dereference(hlist_next_rcu(head));
 	while (head == NULL) {
 		if (++st->bucket >= nf_ct_expect_hsize)
 			return NULL;
-		head = rcu_dereference(net->ct.expect_hash[st->bucket].first);
+		head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
 	}
 	return head;
 }
@@ -630,8 +639,7 @@
 	}
 
 	net->ct.expect_count = 0;
-	net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize,
-						  &net->ct.expect_vmalloc, 0);
+	net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
 	if (net->ct.expect_hash == NULL)
 		goto err1;
 
@@ -653,8 +661,7 @@
 	if (net_eq(net, &init_net))
 		kmem_cache_destroy(nf_ct_expect_cachep);
 err2:
-	nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
-			     nf_ct_expect_hsize);
+	nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
 err1:
 	return err;
 }
@@ -666,6 +673,5 @@
 		rcu_barrier(); /* Wait for call_rcu() before destroy */
 		kmem_cache_destroy(nf_ct_expect_cachep);
 	}
-	nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc,
-			     nf_ct_expect_hsize);
+	nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize);
 }
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index bd82450..80a23ed 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -140,15 +140,16 @@
 	/* This assumes that extended areas in conntrack for the types
 	   whose NF_CT_EXT_F_PREALLOC bit set are allocated in order */
 	for (i = min; i <= max; i++) {
-		t1 = nf_ct_ext_types[i];
+		t1 = rcu_dereference_protected(nf_ct_ext_types[i],
+				lockdep_is_held(&nf_ct_ext_type_mutex));
 		if (!t1)
 			continue;
 
-		t1->alloc_size = sizeof(struct nf_ct_ext)
-				 + ALIGN(sizeof(struct nf_ct_ext), t1->align)
-				 + t1->len;
+		t1->alloc_size = ALIGN(sizeof(struct nf_ct_ext), t1->align) +
+				 t1->len;
 		for (j = 0; j < NF_CT_EXT_NUM; j++) {
-			t2 = nf_ct_ext_types[j];
+			t2 = rcu_dereference_protected(nf_ct_ext_types[j],
+				lockdep_is_held(&nf_ct_ext_type_mutex));
 			if (t2 == NULL || t2 == t1 ||
 			    (t2->flags & NF_CT_EXT_F_PREALLOC) == 0)
 				continue;
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 59e1a4c..1bdfea3 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -33,7 +33,6 @@
 static struct hlist_head *nf_ct_helper_hash __read_mostly;
 static unsigned int nf_ct_helper_hsize __read_mostly;
 static unsigned int nf_ct_helper_count __read_mostly;
-static int nf_ct_helper_vmalloc;
 
 
 /* Stupid hash, but collision free for the default registrations of the
@@ -158,7 +157,10 @@
 	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(i);
 	struct nf_conn_help *help = nfct_help(ct);
 
-	if (help && help->helper == me) {
+	if (help && rcu_dereference_protected(
+			help->helper,
+			lockdep_is_held(&nf_conntrack_lock)
+			) == me) {
 		nf_conntrack_event(IPCT_HELPER, ct);
 		rcu_assign_pointer(help->helper, NULL);
 	}
@@ -210,7 +212,10 @@
 		hlist_for_each_entry_safe(exp, n, next,
 					  &net->ct.expect_hash[i], hnode) {
 			struct nf_conn_help *help = nfct_help(exp->master);
-			if ((help->helper == me || exp->helper == me) &&
+			if ((rcu_dereference_protected(
+					help->helper,
+					lockdep_is_held(&nf_conntrack_lock)
+					) == me || exp->helper == me) &&
 			    del_timer(&exp->timeout)) {
 				nf_ct_unlink_expect(exp);
 				nf_ct_expect_put(exp);
@@ -261,8 +266,7 @@
 	int err;
 
 	nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
-	nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize,
-						  &nf_ct_helper_vmalloc, 0);
+	nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0);
 	if (!nf_ct_helper_hash)
 		return -ENOMEM;
 
@@ -273,14 +277,12 @@
 	return 0;
 
 err1:
-	nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc,
-			     nf_ct_helper_hsize);
+	nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
 	return err;
 }
 
 void nf_conntrack_helper_fini(void)
 {
 	nf_ct_extend_unregister(&helper_extend);
-	nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc,
-			     nf_ct_helper_hsize);
+	nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize);
 }
diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c
index aadde01..4c8f30a 100644
--- a/net/netfilter/nf_conntrack_netbios_ns.c
+++ b/net/netfilter/nf_conntrack_netbios_ns.c
@@ -18,14 +18,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/if_addr.h>
 #include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/netfilter.h>
-#include <net/route.h>
 
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_helper.h>
@@ -40,75 +33,26 @@
 MODULE_ALIAS_NFCT_HELPER("netbios_ns");
 
 static unsigned int timeout __read_mostly = 3;
-module_param(timeout, uint, 0400);
+module_param(timeout, uint, S_IRUSR);
 MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
 
-static int help(struct sk_buff *skb, unsigned int protoff,
-		struct nf_conn *ct, enum ip_conntrack_info ctinfo)
-{
-	struct nf_conntrack_expect *exp;
-	struct iphdr *iph = ip_hdr(skb);
-	struct rtable *rt = skb_rtable(skb);
-	struct in_device *in_dev;
-	__be32 mask = 0;
-
-	/* we're only interested in locally generated packets */
-	if (skb->sk == NULL)
-		goto out;
-	if (rt == NULL || !(rt->rt_flags & RTCF_BROADCAST))
-		goto out;
-	if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
-		goto out;
-
-	rcu_read_lock();
-	in_dev = __in_dev_get_rcu(rt->dst.dev);
-	if (in_dev != NULL) {
-		for_primary_ifa(in_dev) {
-			if (ifa->ifa_broadcast == iph->daddr) {
-				mask = ifa->ifa_mask;
-				break;
-			}
-		} endfor_ifa(in_dev);
-	}
-	rcu_read_unlock();
-
-	if (mask == 0)
-		goto out;
-
-	exp = nf_ct_expect_alloc(ct);
-	if (exp == NULL)
-		goto out;
-
-	exp->tuple                = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
-	exp->tuple.src.u.udp.port = htons(NMBD_PORT);
-
-	exp->mask.src.u3.ip       = mask;
-	exp->mask.src.u.udp.port  = htons(0xFFFF);
-
-	exp->expectfn             = NULL;
-	exp->flags                = NF_CT_EXPECT_PERMANENT;
-	exp->class		  = NF_CT_EXPECT_CLASS_DEFAULT;
-	exp->helper               = NULL;
-
-	nf_ct_expect_related(exp);
-	nf_ct_expect_put(exp);
-
-	nf_ct_refresh(ct, skb, timeout * HZ);
-out:
-	return NF_ACCEPT;
-}
-
 static struct nf_conntrack_expect_policy exp_policy = {
 	.max_expected	= 1,
 };
 
+static int netbios_ns_help(struct sk_buff *skb, unsigned int protoff,
+		   struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+	return nf_conntrack_broadcast_help(skb, protoff, ct, ctinfo, timeout);
+}
+
 static struct nf_conntrack_helper helper __read_mostly = {
 	.name			= "netbios-ns",
-	.tuple.src.l3num	= AF_INET,
+	.tuple.src.l3num	= NFPROTO_IPV4,
 	.tuple.src.u.udp.port	= cpu_to_be16(NMBD_PORT),
 	.tuple.dst.protonum	= IPPROTO_UDP,
 	.me			= THIS_MODULE,
-	.help			= help,
+	.help			= netbios_ns_help,
 	.expect_policy		= &exp_policy,
 };
 
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 2b7eef3..30bf8a1 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -42,6 +42,7 @@
 #include <net/netfilter/nf_conntrack_tuple.h>
 #include <net/netfilter/nf_conntrack_acct.h>
 #include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
 #ifdef CONFIG_NF_NAT_NEEDED
 #include <net/netfilter/nf_nat_core.h>
 #include <net/netfilter/nf_nat_protocol.h>
@@ -230,6 +231,33 @@
 	return -1;
 }
 
+static int
+ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
+{
+	struct nlattr *nest_count;
+	const struct nf_conn_tstamp *tstamp;
+
+	tstamp = nf_conn_tstamp_find(ct);
+	if (!tstamp)
+		return 0;
+
+	nest_count = nla_nest_start(skb, CTA_TIMESTAMP | NLA_F_NESTED);
+	if (!nest_count)
+		goto nla_put_failure;
+
+	NLA_PUT_BE64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start));
+	if (tstamp->stop != 0) {
+		NLA_PUT_BE64(skb, CTA_TIMESTAMP_STOP,
+			     cpu_to_be64(tstamp->stop));
+	}
+	nla_nest_end(skb, nest_count);
+
+	return 0;
+
+nla_put_failure:
+	return -1;
+}
+
 #ifdef CONFIG_NF_CONNTRACK_MARK
 static inline int
 ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
@@ -404,6 +432,7 @@
 	    ctnetlink_dump_timeout(skb, ct) < 0 ||
 	    ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
 	    ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
+	    ctnetlink_dump_timestamp(skb, ct) < 0 ||
 	    ctnetlink_dump_protoinfo(skb, ct) < 0 ||
 	    ctnetlink_dump_helpinfo(skb, ct) < 0 ||
 	    ctnetlink_dump_mark(skb, ct) < 0 ||
@@ -471,6 +500,18 @@
 }
 
 static inline size_t
+ctnetlink_timestamp_size(const struct nf_conn *ct)
+{
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+	if (!nf_ct_ext_exist(ct, NF_CT_EXT_TSTAMP))
+		return 0;
+	return nla_total_size(0) + 2 * nla_total_size(sizeof(uint64_t));
+#else
+	return 0;
+#endif
+}
+
+static inline size_t
 ctnetlink_nlmsg_size(const struct nf_conn *ct)
 {
 	return NLMSG_ALIGN(sizeof(struct nfgenmsg))
@@ -481,6 +522,7 @@
 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
 	       + ctnetlink_counters_size(ct)
+	       + ctnetlink_timestamp_size(ct)
 	       + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
 	       + nla_total_size(0) /* CTA_PROTOINFO */
 	       + nla_total_size(0) /* CTA_HELP */
@@ -571,7 +613,8 @@
 
 	if (events & (1 << IPCT_DESTROY)) {
 		if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
-		    ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
+		    ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
+		    ctnetlink_dump_timestamp(skb, ct) < 0)
 			goto nla_put_failure;
 	} else {
 		if (ctnetlink_dump_timeout(skb, ct) < 0)
@@ -667,6 +710,7 @@
 			if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
 						cb->nlh->nlmsg_seq,
 						IPCTNL_MSG_CT_NEW, ct) < 0) {
+				nf_conntrack_get(&ct->ct_general);
 				cb->args[1] = (unsigned long)ct;
 				goto out;
 			}
@@ -760,7 +804,7 @@
 static int
 ctnetlink_parse_tuple(const struct nlattr * const cda[],
 		      struct nf_conntrack_tuple *tuple,
-		      enum ctattr_tuple type, u_int8_t l3num)
+		      enum ctattr_type type, u_int8_t l3num)
 {
 	struct nlattr *tb[CTA_TUPLE_MAX+1];
 	int err;
@@ -924,7 +968,7 @@
 	u16 zone;
 	int err;
 
-	if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP)
+	if (nlh->nlmsg_flags & NLM_F_DUMP)
 		return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
 					  ctnetlink_done);
 
@@ -1357,6 +1401,7 @@
 	}
 
 	nf_ct_acct_ext_add(ct, GFP_ATOMIC);
+	nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
 	nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
 	/* we must add conntrack extensions before confirmation. */
 	ct->status |= IPS_CONFIRMED;
@@ -1375,6 +1420,7 @@
 	}
 #endif
 
+	memset(&ct->proto, 0, sizeof(ct->proto));
 	if (cda[CTA_PROTOINFO]) {
 		err = ctnetlink_change_protoinfo(ct, cda);
 		if (err < 0)
@@ -1787,7 +1833,7 @@
 	u16 zone;
 	int err;
 
-	if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
+	if (nlh->nlmsg_flags & NLM_F_DUMP) {
 		return netlink_dump_start(ctnl, skb, nlh,
 					  ctnetlink_exp_dump_table,
 					  ctnetlink_exp_done);
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index dc7bb74..5701c8d 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -166,6 +166,7 @@
 int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
 {
 	int ret = 0;
+	struct nf_conntrack_l3proto *old;
 
 	if (proto->l3proto >= AF_MAX)
 		return -EBUSY;
@@ -174,7 +175,9 @@
 		return -EINVAL;
 
 	mutex_lock(&nf_ct_proto_mutex);
-	if (nf_ct_l3protos[proto->l3proto] != &nf_conntrack_l3proto_generic) {
+	old = rcu_dereference_protected(nf_ct_l3protos[proto->l3proto],
+					lockdep_is_held(&nf_ct_proto_mutex));
+	if (old != &nf_conntrack_l3proto_generic) {
 		ret = -EBUSY;
 		goto out_unlock;
 	}
@@ -201,7 +204,9 @@
 	BUG_ON(proto->l3proto >= AF_MAX);
 
 	mutex_lock(&nf_ct_proto_mutex);
-	BUG_ON(nf_ct_l3protos[proto->l3proto] != proto);
+	BUG_ON(rcu_dereference_protected(nf_ct_l3protos[proto->l3proto],
+					 lockdep_is_held(&nf_ct_proto_mutex)
+					 ) != proto);
 	rcu_assign_pointer(nf_ct_l3protos[proto->l3proto],
 			   &nf_conntrack_l3proto_generic);
 	nf_ct_l3proto_unregister_sysctl(proto);
@@ -279,7 +284,7 @@
 	mutex_lock(&nf_ct_proto_mutex);
 	if (!nf_ct_protos[l4proto->l3proto]) {
 		/* l3proto may be loaded latter. */
-		struct nf_conntrack_l4proto **proto_array;
+		struct nf_conntrack_l4proto __rcu **proto_array;
 		int i;
 
 		proto_array = kmalloc(MAX_NF_CT_PROTO *
@@ -291,7 +296,7 @@
 		}
 
 		for (i = 0; i < MAX_NF_CT_PROTO; i++)
-			proto_array[i] = &nf_conntrack_l4proto_generic;
+			RCU_INIT_POINTER(proto_array[i], &nf_conntrack_l4proto_generic);
 
 		/* Before making proto_array visible to lockless readers,
 		 * we must make sure its content is committed to memory.
@@ -299,8 +304,10 @@
 		smp_wmb();
 
 		nf_ct_protos[l4proto->l3proto] = proto_array;
-	} else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] !=
-					&nf_conntrack_l4proto_generic) {
+	} else if (rcu_dereference_protected(
+			nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
+			lockdep_is_held(&nf_ct_proto_mutex)
+			) != &nf_conntrack_l4proto_generic) {
 		ret = -EBUSY;
 		goto out_unlock;
 	}
@@ -331,7 +338,10 @@
 	BUG_ON(l4proto->l3proto >= PF_MAX);
 
 	mutex_lock(&nf_ct_proto_mutex);
-	BUG_ON(nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != l4proto);
+	BUG_ON(rcu_dereference_protected(
+			nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
+			lockdep_is_held(&nf_ct_proto_mutex)
+			) != l4proto);
 	rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
 			   &nf_conntrack_l4proto_generic);
 	nf_ct_l4proto_unregister_sysctl(l4proto);
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 5292560..9ae57c5 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -452,6 +452,9 @@
 	ct->proto.dccp.role[IP_CT_DIR_ORIGINAL] = CT_DCCP_ROLE_CLIENT;
 	ct->proto.dccp.role[IP_CT_DIR_REPLY] = CT_DCCP_ROLE_SERVER;
 	ct->proto.dccp.state = CT_DCCP_NONE;
+	ct->proto.dccp.last_pkt = DCCP_PKT_REQUEST;
+	ct->proto.dccp.last_dir = IP_CT_DIR_ORIGINAL;
+	ct->proto.dccp.handshake_seq = 0;
 	return true;
 
 out_invalid:
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index c6049c2..6f4ee70 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -413,6 +413,7 @@
 	    test_bit(SCTP_CID_COOKIE_ACK, map))
 		return false;
 
+	memset(&ct->proto.sctp, 0, sizeof(ct->proto.sctp));
 	new_state = SCTP_CONNTRACK_MAX;
 	for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
 		/* Don't need lock here: this conntrack not in circulation yet */
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 3fb2b73..6f38d0e 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -1066,9 +1066,7 @@
 	BUG_ON(th == NULL);
 
 	/* Don't need lock here: this conntrack not in circulation yet */
-	new_state
-		= tcp_conntracks[0][get_conntrack_index(th)]
-		[TCP_CONNTRACK_NONE];
+	new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
 
 	/* Invalid: delete conntrack */
 	if (new_state >= TCP_CONNTRACK_MAX) {
@@ -1077,6 +1075,7 @@
 	}
 
 	if (new_state == TCP_CONNTRACK_SYN_SENT) {
+		memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
 		/* SYN packet */
 		ct->proto.tcp.seen[0].td_end =
 			segment_seq_plus_len(ntohl(th->seq), skb->len,
@@ -1088,11 +1087,11 @@
 			ct->proto.tcp.seen[0].td_end;
 
 		tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
-		ct->proto.tcp.seen[1].flags = 0;
 	} else if (nf_ct_tcp_loose == 0) {
 		/* Don't try to pick up connections. */
 		return false;
 	} else {
+		memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
 		/*
 		 * We are in the middle of a connection,
 		 * its history is lost for us.
@@ -1107,7 +1106,6 @@
 		ct->proto.tcp.seen[0].td_maxend =
 			ct->proto.tcp.seen[0].td_end +
 			ct->proto.tcp.seen[0].td_maxwin;
-		ct->proto.tcp.seen[0].td_scale = 0;
 
 		/* We assume SACK and liberal window checking to handle
 		 * window scaling */
@@ -1116,13 +1114,7 @@
 					      IP_CT_TCP_FLAG_BE_LIBERAL;
 	}
 
-	ct->proto.tcp.seen[1].td_end = 0;
-	ct->proto.tcp.seen[1].td_maxend = 0;
-	ct->proto.tcp.seen[1].td_maxwin = 0;
-	ct->proto.tcp.seen[1].td_scale = 0;
-
 	/* tcp_packet will set them */
-	ct->proto.tcp.state = TCP_CONNTRACK_NONE;
 	ct->proto.tcp.last_index = TCP_NONE_SET;
 
 	pr_debug("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i "
diff --git a/net/netfilter/nf_conntrack_snmp.c b/net/netfilter/nf_conntrack_snmp.c
new file mode 100644
index 0000000..6e545e2
--- /dev/null
+++ b/net/netfilter/nf_conntrack_snmp.c
@@ -0,0 +1,77 @@
+/*
+ *      SNMP service broadcast connection tracking helper
+ *
+ *      (c) 2011 Jiri Olsa <jolsa@redhat.com>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/in.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+
+#define SNMP_PORT	161
+
+MODULE_AUTHOR("Jiri Olsa <jolsa@redhat.com>");
+MODULE_DESCRIPTION("SNMP service broadcast connection tracking helper");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NFCT_HELPER("snmp");
+
+static unsigned int timeout __read_mostly = 30;
+module_param(timeout, uint, S_IRUSR);
+MODULE_PARM_DESC(timeout, "timeout for master connection/replies in seconds");
+
+int (*nf_nat_snmp_hook)(struct sk_buff *skb,
+			unsigned int protoff,
+			struct nf_conn *ct,
+			enum ip_conntrack_info ctinfo);
+EXPORT_SYMBOL_GPL(nf_nat_snmp_hook);
+
+static int snmp_conntrack_help(struct sk_buff *skb, unsigned int protoff,
+		struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+	typeof(nf_nat_snmp_hook) nf_nat_snmp;
+
+	nf_conntrack_broadcast_help(skb, protoff, ct, ctinfo, timeout);
+
+	nf_nat_snmp = rcu_dereference(nf_nat_snmp_hook);
+	if (nf_nat_snmp && ct->status & IPS_NAT_MASK)
+		return nf_nat_snmp(skb, protoff, ct, ctinfo);
+
+	return NF_ACCEPT;
+}
+
+static struct nf_conntrack_expect_policy exp_policy = {
+	.max_expected	= 1,
+};
+
+static struct nf_conntrack_helper helper __read_mostly = {
+	.name			= "snmp",
+	.tuple.src.l3num	= NFPROTO_IPV4,
+	.tuple.src.u.udp.port	= cpu_to_be16(SNMP_PORT),
+	.tuple.dst.protonum	= IPPROTO_UDP,
+	.me			= THIS_MODULE,
+	.help			= snmp_conntrack_help,
+	.expect_policy		= &exp_policy,
+};
+
+static int __init nf_conntrack_snmp_init(void)
+{
+	exp_policy.timeout = timeout;
+	return nf_conntrack_helper_register(&helper);
+}
+
+static void __exit nf_conntrack_snmp_fini(void)
+{
+	nf_conntrack_helper_unregister(&helper);
+}
+
+module_init(nf_conntrack_snmp_init);
+module_exit(nf_conntrack_snmp_fini);
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index b4d7f0f..0ae1428 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -29,6 +29,8 @@
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_acct.h>
 #include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <linux/rculist_nulls.h>
 
 MODULE_LICENSE("GPL");
 
@@ -45,6 +47,7 @@
 struct ct_iter_state {
 	struct seq_net_private p;
 	unsigned int bucket;
+	u_int64_t time_now;
 };
 
 static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
@@ -56,7 +59,7 @@
 	for (st->bucket = 0;
 	     st->bucket < net->ct.htable_size;
 	     st->bucket++) {
-		n = rcu_dereference(net->ct.hash[st->bucket].first);
+		n = rcu_dereference(hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
 		if (!is_a_nulls(n))
 			return n;
 	}
@@ -69,13 +72,15 @@
 	struct net *net = seq_file_net(seq);
 	struct ct_iter_state *st = seq->private;
 
-	head = rcu_dereference(head->next);
+	head = rcu_dereference(hlist_nulls_next_rcu(head));
 	while (is_a_nulls(head)) {
 		if (likely(get_nulls_value(head) == st->bucket)) {
 			if (++st->bucket >= net->ct.htable_size)
 				return NULL;
 		}
-		head = rcu_dereference(net->ct.hash[st->bucket].first);
+		head = rcu_dereference(
+				hlist_nulls_first_rcu(
+					&net->ct.hash[st->bucket]));
 	}
 	return head;
 }
@@ -93,6 +98,9 @@
 static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
 	__acquires(RCU)
 {
+	struct ct_iter_state *st = seq->private;
+
+	st->time_now = ktime_to_ns(ktime_get_real());
 	rcu_read_lock();
 	return ct_get_idx(seq, *pos);
 }
@@ -132,6 +140,34 @@
 }
 #endif
 
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+static int ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
+{
+	struct ct_iter_state *st = s->private;
+	struct nf_conn_tstamp *tstamp;
+	s64 delta_time;
+
+	tstamp = nf_conn_tstamp_find(ct);
+	if (tstamp) {
+		delta_time = st->time_now - tstamp->start;
+		if (delta_time > 0)
+			delta_time = div_s64(delta_time, NSEC_PER_SEC);
+		else
+			delta_time = 0;
+
+		return seq_printf(s, "delta-time=%llu ",
+				  (unsigned long long)delta_time);
+	}
+	return 0;
+}
+#else
+static inline int
+ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct)
+{
+	return 0;
+}
+#endif
+
 /* return 0 on success, 1 in case of error */
 static int ct_seq_show(struct seq_file *s, void *v)
 {
@@ -200,6 +236,9 @@
 		goto release;
 #endif
 
+	if (ct_show_delta_time(s, ct))
+		goto release;
+
 	if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
 		goto release;
 
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
new file mode 100644
index 0000000..af7dd31
--- /dev/null
+++ b/net/netfilter/nf_conntrack_timestamp.c
@@ -0,0 +1,120 @@
+/*
+ * (C) 2010 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation (or any later at your option).
+ */
+
+#include <linux/netfilter.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_timestamp.h>
+
+static int nf_ct_tstamp __read_mostly;
+
+module_param_named(tstamp, nf_ct_tstamp, bool, 0644);
+MODULE_PARM_DESC(tstamp, "Enable connection tracking flow timestamping.");
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table tstamp_sysctl_table[] = {
+	{
+		.procname	= "nf_conntrack_timestamp",
+		.data		= &init_net.ct.sysctl_tstamp,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{}
+};
+#endif /* CONFIG_SYSCTL */
+
+static struct nf_ct_ext_type tstamp_extend __read_mostly = {
+	.len	= sizeof(struct nf_conn_tstamp),
+	.align	= __alignof__(struct nf_conn_tstamp),
+	.id	= NF_CT_EXT_TSTAMP,
+};
+
+#ifdef CONFIG_SYSCTL
+static int nf_conntrack_tstamp_init_sysctl(struct net *net)
+{
+	struct ctl_table *table;
+
+	table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
+			GFP_KERNEL);
+	if (!table)
+		goto out;
+
+	table[0].data = &net->ct.sysctl_tstamp;
+
+	net->ct.tstamp_sysctl_header = register_net_sysctl_table(net,
+			nf_net_netfilter_sysctl_path, table);
+	if (!net->ct.tstamp_sysctl_header) {
+		printk(KERN_ERR "nf_ct_tstamp: can't register to sysctl.\n");
+		goto out_register;
+	}
+	return 0;
+
+out_register:
+	kfree(table);
+out:
+	return -ENOMEM;
+}
+
+static void nf_conntrack_tstamp_fini_sysctl(struct net *net)
+{
+	struct ctl_table *table;
+
+	table = net->ct.tstamp_sysctl_header->ctl_table_arg;
+	unregister_net_sysctl_table(net->ct.tstamp_sysctl_header);
+	kfree(table);
+}
+#else
+static int nf_conntrack_tstamp_init_sysctl(struct net *net)
+{
+	return 0;
+}
+
+static void nf_conntrack_tstamp_fini_sysctl(struct net *net)
+{
+}
+#endif
+
+int nf_conntrack_tstamp_init(struct net *net)
+{
+	int ret;
+
+	net->ct.sysctl_tstamp = nf_ct_tstamp;
+
+	if (net_eq(net, &init_net)) {
+		ret = nf_ct_extend_register(&tstamp_extend);
+		if (ret < 0) {
+			printk(KERN_ERR "nf_ct_tstamp: Unable to register "
+					"extension\n");
+			goto out_extend_register;
+		}
+	}
+
+	ret = nf_conntrack_tstamp_init_sysctl(net);
+	if (ret < 0)
+		goto out_sysctl;
+
+	return 0;
+
+out_sysctl:
+	if (net_eq(net, &init_net))
+		nf_ct_extend_unregister(&tstamp_extend);
+out_extend_register:
+	return ret;
+}
+
+void nf_conntrack_tstamp_fini(struct net *net)
+{
+	nf_conntrack_tstamp_fini_sysctl(net);
+	if (net_eq(net, &init_net))
+		nf_ct_extend_unregister(&tstamp_extend);
+}
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index b07393e..20c775c 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -161,7 +161,8 @@
 	struct nf_logger *t;
 	int ret;
 
-	logger = nf_loggers[*pos];
+	logger = rcu_dereference_protected(nf_loggers[*pos],
+					   lockdep_is_held(&nf_log_mutex));
 
 	if (!logger)
 		ret = seq_printf(s, "%2lld NONE (", *pos);
@@ -249,7 +250,8 @@
 		mutex_unlock(&nf_log_mutex);
 	} else {
 		mutex_lock(&nf_log_mutex);
-		logger = nf_loggers[tindex];
+		logger = rcu_dereference_protected(nf_loggers[tindex],
+						   lockdep_is_held(&nf_log_mutex));
 		if (!logger)
 			table->data = "NONE";
 		else
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 74aebed..5ab22e2 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -27,14 +27,17 @@
 int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
 {
 	int ret;
+	const struct nf_queue_handler *old;
 
 	if (pf >= ARRAY_SIZE(queue_handler))
 		return -EINVAL;
 
 	mutex_lock(&queue_handler_mutex);
-	if (queue_handler[pf] == qh)
+	old = rcu_dereference_protected(queue_handler[pf],
+					lockdep_is_held(&queue_handler_mutex));
+	if (old == qh)
 		ret = -EEXIST;
-	else if (queue_handler[pf])
+	else if (old)
 		ret = -EBUSY;
 	else {
 		rcu_assign_pointer(queue_handler[pf], qh);
@@ -49,11 +52,15 @@
 /* The caller must flush their queue before this */
 int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
 {
+	const struct nf_queue_handler *old;
+
 	if (pf >= ARRAY_SIZE(queue_handler))
 		return -EINVAL;
 
 	mutex_lock(&queue_handler_mutex);
-	if (queue_handler[pf] && queue_handler[pf] != qh) {
+	old = rcu_dereference_protected(queue_handler[pf],
+					lockdep_is_held(&queue_handler_mutex));
+	if (old && old != qh) {
 		mutex_unlock(&queue_handler_mutex);
 		return -EINVAL;
 	}
@@ -73,7 +80,10 @@
 
 	mutex_lock(&queue_handler_mutex);
 	for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++)  {
-		if (queue_handler[pf] == qh)
+		if (rcu_dereference_protected(
+				queue_handler[pf],
+				lockdep_is_held(&queue_handler_mutex)
+				) == qh)
 			rcu_assign_pointer(queue_handler[pf], NULL);
 	}
 	mutex_unlock(&queue_handler_mutex);
@@ -115,7 +125,7 @@
 		      int (*okfn)(struct sk_buff *),
 		      unsigned int queuenum)
 {
-	int status;
+	int status = -ENOENT;
 	struct nf_queue_entry *entry = NULL;
 #ifdef CONFIG_BRIDGE_NETFILTER
 	struct net_device *physindev;
@@ -128,16 +138,20 @@
 	rcu_read_lock();
 
 	qh = rcu_dereference(queue_handler[pf]);
-	if (!qh)
+	if (!qh) {
+		status = -ESRCH;
 		goto err_unlock;
+	}
 
 	afinfo = nf_get_afinfo(pf);
 	if (!afinfo)
 		goto err_unlock;
 
 	entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
-	if (!entry)
+	if (!entry) {
+		status = -ENOMEM;
 		goto err_unlock;
+	}
 
 	*entry = (struct nf_queue_entry) {
 		.skb	= skb,
@@ -151,11 +165,9 @@
 
 	/* If it's going away, ignore hook. */
 	if (!try_module_get(entry->elem->owner)) {
-		rcu_read_unlock();
-		kfree(entry);
-		return 0;
+		status = -ECANCELED;
+		goto err_unlock;
 	}
-
 	/* Bump dev refs so they don't vanish while packet is out */
 	if (indev)
 		dev_hold(indev);
@@ -182,14 +194,13 @@
 		goto err;
 	}
 
-	return 1;
+	return 0;
 
 err_unlock:
 	rcu_read_unlock();
 err:
-	kfree_skb(skb);
 	kfree(entry);
-	return 1;
+	return status;
 }
 
 int nf_queue(struct sk_buff *skb,
@@ -201,6 +212,8 @@
 	     unsigned int queuenum)
 {
 	struct sk_buff *segs;
+	int err;
+	unsigned int queued;
 
 	if (!skb_is_gso(skb))
 		return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
@@ -216,20 +229,35 @@
 	}
 
 	segs = skb_gso_segment(skb, 0);
-	kfree_skb(skb);
+	/* Does not use PTR_ERR to limit the number of error codes that can be
+	 * returned by nf_queue.  For instance, callers rely on -ECANCELED to mean
+	 * 'ignore this hook'.
+	 */
 	if (IS_ERR(segs))
-		return 1;
+		return -EINVAL;
 
+	queued = 0;
+	err = 0;
 	do {
 		struct sk_buff *nskb = segs->next;
 
 		segs->next = NULL;
-		if (!__nf_queue(segs, elem, pf, hook, indev, outdev, okfn,
-				queuenum))
+		if (err == 0)
+			err = __nf_queue(segs, elem, pf, hook, indev,
+					   outdev, okfn, queuenum);
+		if (err == 0)
+			queued++;
+		else
 			kfree_skb(segs);
 		segs = nskb;
 	} while (segs);
-	return 1;
+
+	/* also free orig skb if only some segments were queued */
+	if (unlikely(err && queued))
+		err = 0;
+	if (err == 0)
+		kfree_skb(skb);
+	return err;
 }
 
 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
@@ -237,6 +265,7 @@
 	struct sk_buff *skb = entry->skb;
 	struct list_head *elem = &entry->elem->list;
 	const struct nf_afinfo *afinfo;
+	int err;
 
 	rcu_read_lock();
 
@@ -270,10 +299,17 @@
 		local_bh_enable();
 		break;
 	case NF_QUEUE:
-		if (!__nf_queue(skb, elem, entry->pf, entry->hook,
-				entry->indev, entry->outdev, entry->okfn,
-				verdict >> NF_VERDICT_BITS))
-			goto next_hook;
+		err = __nf_queue(skb, elem, entry->pf, entry->hook,
+				 entry->indev, entry->outdev, entry->okfn,
+				 verdict >> NF_VERDICT_QBITS);
+		if (err < 0) {
+			if (err == -ECANCELED)
+				goto next_hook;
+			if (err == -ESRCH &&
+			   (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
+				goto next_hook;
+			kfree_skb(skb);
+		}
 		break;
 	case NF_STOLEN:
 	default:
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c
index 4d87bef..474d621 100644
--- a/net/netfilter/nf_tproxy_core.c
+++ b/net/netfilter/nf_tproxy_core.c
@@ -28,26 +28,23 @@
 	skb->destructor = NULL;
 
 	if (sk)
-		nf_tproxy_put_sock(sk);
+		sock_put(sk);
 }
 
 /* consumes sk */
-int
+void
 nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
 {
-	bool transparent = (sk->sk_state == TCP_TIME_WAIT) ?
-				inet_twsk(sk)->tw_transparent :
-				inet_sk(sk)->transparent;
+	/* assigning tw sockets complicates things; most
+	 * skb->sk->X checks would have to test sk->sk_state first */
+	if (sk->sk_state == TCP_TIME_WAIT) {
+		inet_twsk_put(inet_twsk(sk));
+		return;
+	}
 
-	if (transparent) {
-		skb_orphan(skb);
-		skb->sk = sk;
-		skb->destructor = nf_tproxy_destructor;
-		return 1;
-	} else
-		nf_tproxy_put_sock(sk);
-
-	return 0;
+	skb_orphan(skb);
+	skb->sk = sk;
+	skb->destructor = nf_tproxy_destructor;
 }
 EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock);
 
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 6a1572b..91592da 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -874,19 +874,19 @@
 
 	for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
 		if (!hlist_empty(&instance_table[st->bucket]))
-			return rcu_dereference_bh(instance_table[st->bucket].first);
+			return rcu_dereference_bh(hlist_first_rcu(&instance_table[st->bucket]));
 	}
 	return NULL;
 }
 
 static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h)
 {
-	h = rcu_dereference_bh(h->next);
+	h = rcu_dereference_bh(hlist_next_rcu(h));
 	while (!h) {
 		if (++st->bucket >= INSTANCE_BUCKETS)
 			return NULL;
 
-		h = rcu_dereference_bh(instance_table[st->bucket].first);
+		h = rcu_dereference_bh(hlist_first_rcu(&instance_table[st->bucket]));
 	}
 	return h;
 }
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 68e67d1..b83123f 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -387,25 +387,31 @@
 {
 	struct sk_buff *nskb;
 	struct nfqnl_instance *queue;
-	int err;
+	int err = -ENOBUFS;
 
 	/* rcu_read_lock()ed by nf_hook_slow() */
 	queue = instance_lookup(queuenum);
-	if (!queue)
+	if (!queue) {
+		err = -ESRCH;
 		goto err_out;
+	}
 
-	if (queue->copy_mode == NFQNL_COPY_NONE)
+	if (queue->copy_mode == NFQNL_COPY_NONE) {
+		err = -EINVAL;
 		goto err_out;
+	}
 
 	nskb = nfqnl_build_packet_message(queue, entry);
-	if (nskb == NULL)
+	if (nskb == NULL) {
+		err = -ENOMEM;
 		goto err_out;
-
+	}
 	spin_lock_bh(&queue->lock);
 
-	if (!queue->peer_pid)
+	if (!queue->peer_pid) {
+		err = -EINVAL;
 		goto err_out_free_nskb;
-
+	}
 	if (queue->queue_total >= queue->queue_maxlen) {
 		queue->queue_dropped++;
 		if (net_ratelimit())
@@ -432,7 +438,7 @@
 err_out_unlock:
 	spin_unlock_bh(&queue->lock);
 err_out:
-	return -1;
+	return err;
 }
 
 static int
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index c942376..0a77d2f 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -23,6 +23,7 @@
 #include <linux/mutex.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
+#include <linux/audit.h>
 #include <net/net_namespace.h>
 
 #include <linux/netfilter/x_tables.h>
@@ -38,9 +39,8 @@
 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
 
 struct compat_delta {
-	struct compat_delta *next;
-	unsigned int offset;
-	int delta;
+	unsigned int offset; /* offset in kernel */
+	int delta; /* delta in 32bit user land */
 };
 
 struct xt_af {
@@ -49,7 +49,9 @@
 	struct list_head target;
 #ifdef CONFIG_COMPAT
 	struct mutex compat_mutex;
-	struct compat_delta *compat_offsets;
+	struct compat_delta *compat_tab;
+	unsigned int number; /* number of slots in compat_tab[] */
+	unsigned int cur; /* number of used slots in compat_tab[] */
 #endif
 };
 
@@ -414,54 +416,67 @@
 EXPORT_SYMBOL_GPL(xt_check_match);
 
 #ifdef CONFIG_COMPAT
-int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta)
+int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
 {
-	struct compat_delta *tmp;
+	struct xt_af *xp = &xt[af];
 
-	tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
-	if (!tmp)
-		return -ENOMEM;
-
-	tmp->offset = offset;
-	tmp->delta = delta;
-
-	if (xt[af].compat_offsets) {
-		tmp->next = xt[af].compat_offsets->next;
-		xt[af].compat_offsets->next = tmp;
-	} else {
-		xt[af].compat_offsets = tmp;
-		tmp->next = NULL;
+	if (!xp->compat_tab) {
+		if (!xp->number)
+			return -EINVAL;
+		xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
+		if (!xp->compat_tab)
+			return -ENOMEM;
+		xp->cur = 0;
 	}
+
+	if (xp->cur >= xp->number)
+		return -EINVAL;
+
+	if (xp->cur)
+		delta += xp->compat_tab[xp->cur - 1].delta;
+	xp->compat_tab[xp->cur].offset = offset;
+	xp->compat_tab[xp->cur].delta = delta;
+	xp->cur++;
 	return 0;
 }
 EXPORT_SYMBOL_GPL(xt_compat_add_offset);
 
 void xt_compat_flush_offsets(u_int8_t af)
 {
-	struct compat_delta *tmp, *next;
-
-	if (xt[af].compat_offsets) {
-		for (tmp = xt[af].compat_offsets; tmp; tmp = next) {
-			next = tmp->next;
-			kfree(tmp);
-		}
-		xt[af].compat_offsets = NULL;
+	if (xt[af].compat_tab) {
+		vfree(xt[af].compat_tab);
+		xt[af].compat_tab = NULL;
+		xt[af].number = 0;
 	}
 }
 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
 
 int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
 {
-	struct compat_delta *tmp;
-	int delta;
+	struct compat_delta *tmp = xt[af].compat_tab;
+	int mid, left = 0, right = xt[af].cur - 1;
 
-	for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next)
-		if (tmp->offset < offset)
-			delta += tmp->delta;
-	return delta;
+	while (left <= right) {
+		mid = (left + right) >> 1;
+		if (offset > tmp[mid].offset)
+			left = mid + 1;
+		else if (offset < tmp[mid].offset)
+			right = mid - 1;
+		else
+			return mid ? tmp[mid - 1].delta : 0;
+	}
+	WARN_ON_ONCE(1);
+	return 0;
 }
 EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
 
+void xt_compat_init_offsets(u_int8_t af, unsigned int number)
+{
+	xt[af].number = number;
+	xt[af].cur = 0;
+}
+EXPORT_SYMBOL(xt_compat_init_offsets);
+
 int xt_compat_match_offset(const struct xt_match *match)
 {
 	u_int16_t csize = match->compatsize ? : match->matchsize;
@@ -820,6 +835,21 @@
 	 */
 	local_bh_enable();
 
+#ifdef CONFIG_AUDIT
+	if (audit_enabled) {
+		struct audit_buffer *ab;
+
+		ab = audit_log_start(current->audit_context, GFP_KERNEL,
+				     AUDIT_NETFILTER_CFG);
+		if (ab) {
+			audit_log_format(ab, "table=%s family=%u entries=%u",
+					 table->name, table->af,
+					 private->number);
+			audit_log_end(ab);
+		}
+	}
+#endif
+
 	return private;
 }
 EXPORT_SYMBOL_GPL(xt_replace_table);
@@ -1338,7 +1368,7 @@
 		mutex_init(&xt[i].mutex);
 #ifdef CONFIG_COMPAT
 		mutex_init(&xt[i].compat_mutex);
-		xt[i].compat_offsets = NULL;
+		xt[i].compat_tab = NULL;
 #endif
 		INIT_LIST_HEAD(&xt[i].target);
 		INIT_LIST_HEAD(&xt[i].match);
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c
new file mode 100644
index 0000000..81802d2
--- /dev/null
+++ b/net/netfilter/xt_AUDIT.c
@@ -0,0 +1,204 @@
+/*
+ * Creates audit record for dropped/accepted packets
+ *
+ * (C) 2010-2011 Thomas Graf <tgraf@redhat.com>
+ * (C) 2010-2011 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/audit.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_AUDIT.h>
+#include <net/ipv6.h>
+#include <net/ip.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Graf <tgraf@redhat.com>");
+MODULE_DESCRIPTION("Xtables: creates audit records for dropped/accepted packets");
+MODULE_ALIAS("ipt_AUDIT");
+MODULE_ALIAS("ip6t_AUDIT");
+MODULE_ALIAS("ebt_AUDIT");
+MODULE_ALIAS("arpt_AUDIT");
+
+static void audit_proto(struct audit_buffer *ab, struct sk_buff *skb,
+			unsigned int proto, unsigned int offset)
+{
+	switch (proto) {
+	case IPPROTO_TCP:
+	case IPPROTO_UDP:
+	case IPPROTO_UDPLITE: {
+		const __be16 *pptr;
+		__be16 _ports[2];
+
+		pptr = skb_header_pointer(skb, offset, sizeof(_ports), _ports);
+		if (pptr == NULL) {
+			audit_log_format(ab, " truncated=1");
+			return;
+		}
+
+		audit_log_format(ab, " sport=%hu dport=%hu",
+				 ntohs(pptr[0]), ntohs(pptr[1]));
+		}
+		break;
+
+	case IPPROTO_ICMP:
+	case IPPROTO_ICMPV6: {
+		const u8 *iptr;
+		u8 _ih[2];
+
+		iptr = skb_header_pointer(skb, offset, sizeof(_ih), &_ih);
+		if (iptr == NULL) {
+			audit_log_format(ab, " truncated=1");
+			return;
+		}
+
+		audit_log_format(ab, " icmptype=%hhu icmpcode=%hhu",
+				 iptr[0], iptr[1]);
+
+		}
+		break;
+	}
+}
+
+static void audit_ip4(struct audit_buffer *ab, struct sk_buff *skb)
+{
+	struct iphdr _iph;
+	const struct iphdr *ih;
+
+	ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
+	if (!ih) {
+		audit_log_format(ab, " truncated=1");
+		return;
+	}
+
+	audit_log_format(ab, " saddr=%pI4 daddr=%pI4 ipid=%hu proto=%hhu",
+		&ih->saddr, &ih->daddr, ntohs(ih->id), ih->protocol);
+
+	if (ntohs(ih->frag_off) & IP_OFFSET) {
+		audit_log_format(ab, " frag=1");
+		return;
+	}
+
+	audit_proto(ab, skb, ih->protocol, ih->ihl * 4);
+}
+
+static void audit_ip6(struct audit_buffer *ab, struct sk_buff *skb)
+{
+	struct ipv6hdr _ip6h;
+	const struct ipv6hdr *ih;
+	u8 nexthdr;
+	int offset;
+
+	ih = skb_header_pointer(skb, skb_network_offset(skb), sizeof(_ip6h), &_ip6h);
+	if (!ih) {
+		audit_log_format(ab, " truncated=1");
+		return;
+	}
+
+	nexthdr = ih->nexthdr;
+	offset = ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(_ip6h),
+				  &nexthdr);
+
+	audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu",
+			 &ih->saddr, &ih->daddr, nexthdr);
+
+	if (offset)
+		audit_proto(ab, skb, nexthdr, offset);
+}
+
+static unsigned int
+audit_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	const struct xt_audit_info *info = par->targinfo;
+	struct audit_buffer *ab;
+
+	ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT);
+	if (ab == NULL)
+		goto errout;
+
+	audit_log_format(ab, "action=%hhu hook=%u len=%u inif=%s outif=%s",
+			 info->type, par->hooknum, skb->len,
+			 par->in ? par->in->name : "?",
+			 par->out ? par->out->name : "?");
+
+	if (skb->mark)
+		audit_log_format(ab, " mark=%#x", skb->mark);
+
+	if (skb->dev && skb->dev->type == ARPHRD_ETHER) {
+		audit_log_format(ab, " smac=%pM dmac=%pM macproto=0x%04x",
+				 eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+				 ntohs(eth_hdr(skb)->h_proto));
+
+		if (par->family == NFPROTO_BRIDGE) {
+			switch (eth_hdr(skb)->h_proto) {
+			case __constant_htons(ETH_P_IP):
+				audit_ip4(ab, skb);
+				break;
+
+			case __constant_htons(ETH_P_IPV6):
+				audit_ip6(ab, skb);
+				break;
+			}
+		}
+	}
+
+	switch (par->family) {
+	case NFPROTO_IPV4:
+		audit_ip4(ab, skb);
+		break;
+
+	case NFPROTO_IPV6:
+		audit_ip6(ab, skb);
+		break;
+	}
+
+	audit_log_end(ab);
+
+errout:
+	return XT_CONTINUE;
+}
+
+static int audit_tg_check(const struct xt_tgchk_param *par)
+{
+	const struct xt_audit_info *info = par->targinfo;
+
+	if (info->type > XT_AUDIT_TYPE_MAX) {
+		pr_info("Audit type out of range (valid range: 0..%hhu)\n",
+			XT_AUDIT_TYPE_MAX);
+		return -ERANGE;
+	}
+
+	return 0;
+}
+
+static struct xt_target audit_tg_reg __read_mostly = {
+	.name		= "AUDIT",
+	.family		= NFPROTO_UNSPEC,
+	.target		= audit_tg,
+	.targetsize	= sizeof(struct xt_audit_info),
+	.checkentry	= audit_tg_check,
+	.me		= THIS_MODULE,
+};
+
+static int __init audit_tg_init(void)
+{
+	return xt_register_target(&audit_tg_reg);
+}
+
+static void __exit audit_tg_exit(void)
+{
+	xt_unregister_target(&audit_tg_reg);
+}
+
+module_init(audit_tg_init);
+module_exit(audit_tg_exit);
diff --git a/net/netfilter/xt_CLASSIFY.c b/net/netfilter/xt_CLASSIFY.c
index c2c0e4a..af9c4da 100644
--- a/net/netfilter/xt_CLASSIFY.c
+++ b/net/netfilter/xt_CLASSIFY.c
@@ -19,12 +19,14 @@
 #include <linux/netfilter_ipv6.h>
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_CLASSIFY.h>
+#include <linux/netfilter_arp.h>
 
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Xtables: Qdisc classification");
 MODULE_ALIAS("ipt_CLASSIFY");
 MODULE_ALIAS("ip6t_CLASSIFY");
+MODULE_ALIAS("arpt_CLASSIFY");
 
 static unsigned int
 classify_tg(struct sk_buff *skb, const struct xt_action_param *par)
@@ -35,26 +37,36 @@
 	return XT_CONTINUE;
 }
 
-static struct xt_target classify_tg_reg __read_mostly = {
-	.name       = "CLASSIFY",
-	.revision   = 0,
-	.family     = NFPROTO_UNSPEC,
-	.table      = "mangle",
-	.hooks      = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) |
-		      (1 << NF_INET_POST_ROUTING),
-	.target     = classify_tg,
-	.targetsize = sizeof(struct xt_classify_target_info),
-	.me         = THIS_MODULE,
+static struct xt_target classify_tg_reg[] __read_mostly = {
+	{
+		.name       = "CLASSIFY",
+		.revision   = 0,
+		.family     = NFPROTO_UNSPEC,
+		.hooks      = (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_FORWARD) |
+		              (1 << NF_INET_POST_ROUTING),
+		.target     = classify_tg,
+		.targetsize = sizeof(struct xt_classify_target_info),
+		.me         = THIS_MODULE,
+	},
+	{
+		.name       = "CLASSIFY",
+		.revision   = 0,
+		.family     = NFPROTO_ARP,
+		.hooks      = (1 << NF_ARP_OUT) | (1 << NF_ARP_FORWARD),
+		.target     = classify_tg,
+		.targetsize = sizeof(struct xt_classify_target_info),
+		.me         = THIS_MODULE,
+	},
 };
 
 static int __init classify_tg_init(void)
 {
-	return xt_register_target(&classify_tg_reg);
+	return xt_register_targets(classify_tg_reg, ARRAY_SIZE(classify_tg_reg));
 }
 
 static void __exit classify_tg_exit(void)
 {
-	xt_unregister_target(&classify_tg_reg);
+	xt_unregister_targets(classify_tg_reg, ARRAY_SIZE(classify_tg_reg));
 }
 
 module_init(classify_tg_init);
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index be1f22e..3bdd443 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -313,3 +313,5 @@
 MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
 MODULE_DESCRIPTION("Xtables: idle time monitor");
 MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("ipt_IDLETIMER");
+MODULE_ALIAS("ip6t_IDLETIMER");
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
index a414050..993de2b 100644
--- a/net/netfilter/xt_LED.c
+++ b/net/netfilter/xt_LED.c
@@ -31,6 +31,8 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Adam Nielsen <a.nielsen@shikadi.net>");
 MODULE_DESCRIPTION("Xtables: trigger LED devices on packet match");
+MODULE_ALIAS("ipt_LED");
+MODULE_ALIAS("ip6t_LED");
 
 static LIST_HEAD(xt_led_triggers);
 static DEFINE_MUTEX(xt_led_mutex);
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 039cce1..d4f4b5d 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -72,18 +72,31 @@
 
 	if (info->queues_total > 1) {
 		if (par->family == NFPROTO_IPV4)
-			queue = hash_v4(skb) % info->queues_total + queue;
+			queue = (((u64) hash_v4(skb) * info->queues_total) >>
+				 32) + queue;
 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
 		else if (par->family == NFPROTO_IPV6)
-			queue = hash_v6(skb) % info->queues_total + queue;
+			queue = (((u64) hash_v6(skb) * info->queues_total) >>
+				 32) + queue;
 #endif
 	}
 	return NF_QUEUE_NR(queue);
 }
 
-static int nfqueue_tg_v1_check(const struct xt_tgchk_param *par)
+static unsigned int
+nfqueue_tg_v2(struct sk_buff *skb, const struct xt_action_param *par)
 {
-	const struct xt_NFQ_info_v1 *info = par->targinfo;
+	const struct xt_NFQ_info_v2 *info = par->targinfo;
+	unsigned int ret = nfqueue_tg_v1(skb, par);
+
+	if (info->bypass)
+		ret |= NF_VERDICT_FLAG_QUEUE_BYPASS;
+	return ret;
+}
+
+static int nfqueue_tg_check(const struct xt_tgchk_param *par)
+{
+	const struct xt_NFQ_info_v2 *info = par->targinfo;
 	u32 maxid;
 
 	if (unlikely(!rnd_inited)) {
@@ -100,6 +113,8 @@
 		       info->queues_total, maxid);
 		return -ERANGE;
 	}
+	if (par->target->revision == 2 && info->bypass > 1)
+		return -EINVAL;
 	return 0;
 }
 
@@ -115,11 +130,20 @@
 		.name		= "NFQUEUE",
 		.revision	= 1,
 		.family		= NFPROTO_UNSPEC,
-		.checkentry	= nfqueue_tg_v1_check,
+		.checkentry	= nfqueue_tg_check,
 		.target		= nfqueue_tg_v1,
 		.targetsize	= sizeof(struct xt_NFQ_info_v1),
 		.me		= THIS_MODULE,
 	},
+	{
+		.name		= "NFQUEUE",
+		.revision	= 2,
+		.family		= NFPROTO_UNSPEC,
+		.checkentry	= nfqueue_tg_check,
+		.target		= nfqueue_tg_v2,
+		.targetsize	= sizeof(struct xt_NFQ_info_v2),
+		.me		= THIS_MODULE,
+	},
 };
 
 static int __init nfqueue_tg_init(void)
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 640678f..dcfd57e 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -33,6 +33,20 @@
 #include <net/netfilter/nf_tproxy_core.h>
 #include <linux/netfilter/xt_TPROXY.h>
 
+static bool tproxy_sk_is_transparent(struct sock *sk)
+{
+	if (sk->sk_state != TCP_TIME_WAIT) {
+		if (inet_sk(sk)->transparent)
+			return true;
+		sock_put(sk);
+	} else {
+		if (inet_twsk(sk)->tw_transparent)
+			return true;
+		inet_twsk_put(inet_twsk(sk));
+	}
+	return false;
+}
+
 static inline __be32
 tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
 {
@@ -141,7 +155,7 @@
 					   skb->dev, NFT_LOOKUP_LISTENER);
 
 	/* NOTE: assign_sock consumes our sk reference */
-	if (sk && nf_tproxy_assign_sock(skb, sk)) {
+	if (sk && tproxy_sk_is_transparent(sk)) {
 		/* This should be in a separate target, but we don't do multiple
 		   targets on the same rule yet */
 		skb->mark = (skb->mark & ~mark_mask) ^ mark_value;
@@ -149,6 +163,8 @@
 		pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n",
 			 iph->protocol, &iph->daddr, ntohs(hp->dest),
 			 &laddr, ntohs(lport), skb->mark);
+
+		nf_tproxy_assign_sock(skb, sk);
 		return NF_ACCEPT;
 	}
 
@@ -306,7 +322,7 @@
 					   par->in, NFT_LOOKUP_LISTENER);
 
 	/* NOTE: assign_sock consumes our sk reference */
-	if (sk && nf_tproxy_assign_sock(skb, sk)) {
+	if (sk && tproxy_sk_is_transparent(sk)) {
 		/* This should be in a separate target, but we don't do multiple
 		   targets on the same rule yet */
 		skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value;
@@ -314,6 +330,8 @@
 		pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n",
 			 tproto, &iph->saddr, ntohs(hp->source),
 			 laddr, ntohs(lport), skb->mark);
+
+		nf_tproxy_assign_sock(skb, sk);
 		return NF_ACCEPT;
 	}
 
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 5c5b6b9..e029c48 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -185,18 +185,24 @@
 	int connections;
 
 	ct = nf_ct_get(skb, &ctinfo);
-	if (ct != NULL)
-		tuple_ptr = &ct->tuplehash[0].tuple;
-	else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
-				    par->family, &tuple))
+	if (ct != NULL) {
+		if (info->flags & XT_CONNLIMIT_DADDR)
+			tuple_ptr = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+		else
+			tuple_ptr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+	} else if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
+				    par->family, &tuple)) {
 		goto hotdrop;
+	}
 
 	if (par->family == NFPROTO_IPV6) {
 		const struct ipv6hdr *iph = ipv6_hdr(skb);
-		memcpy(&addr.ip6, &iph->saddr, sizeof(iph->saddr));
+		memcpy(&addr.ip6, (info->flags & XT_CONNLIMIT_DADDR) ?
+		       &iph->daddr : &iph->saddr, sizeof(addr.ip6));
 	} else {
 		const struct iphdr *iph = ip_hdr(skb);
-		addr.ip = iph->saddr;
+		addr.ip = (info->flags & XT_CONNLIMIT_DADDR) ?
+			  iph->daddr : iph->saddr;
 	}
 
 	spin_lock_bh(&info->data->lock);
@@ -204,13 +210,12 @@
 	                         &info->mask, par->family);
 	spin_unlock_bh(&info->data->lock);
 
-	if (connections < 0) {
+	if (connections < 0)
 		/* kmalloc failed, drop it entirely */
-		par->hotdrop = true;
-		return false;
-	}
+		goto hotdrop;
 
-	return (connections > info->limit) ^ info->inverse;
+	return (connections > info->limit) ^
+	       !!(info->flags & XT_CONNLIMIT_INVERT);
 
  hotdrop:
 	par->hotdrop = true;
@@ -268,25 +273,38 @@
 	kfree(info->data);
 }
 
-static struct xt_match connlimit_mt_reg __read_mostly = {
-	.name       = "connlimit",
-	.revision   = 0,
-	.family     = NFPROTO_UNSPEC,
-	.checkentry = connlimit_mt_check,
-	.match      = connlimit_mt,
-	.matchsize  = sizeof(struct xt_connlimit_info),
-	.destroy    = connlimit_mt_destroy,
-	.me         = THIS_MODULE,
+static struct xt_match connlimit_mt_reg[] __read_mostly = {
+	{
+		.name       = "connlimit",
+		.revision   = 0,
+		.family     = NFPROTO_UNSPEC,
+		.checkentry = connlimit_mt_check,
+		.match      = connlimit_mt,
+		.matchsize  = sizeof(struct xt_connlimit_info),
+		.destroy    = connlimit_mt_destroy,
+		.me         = THIS_MODULE,
+	},
+	{
+		.name       = "connlimit",
+		.revision   = 1,
+		.family     = NFPROTO_UNSPEC,
+		.checkentry = connlimit_mt_check,
+		.match      = connlimit_mt,
+		.matchsize  = sizeof(struct xt_connlimit_info),
+		.destroy    = connlimit_mt_destroy,
+		.me         = THIS_MODULE,
+	},
 };
 
 static int __init connlimit_mt_init(void)
 {
-	return xt_register_match(&connlimit_mt_reg);
+	return xt_register_matches(connlimit_mt_reg,
+	       ARRAY_SIZE(connlimit_mt_reg));
 }
 
 static void __exit connlimit_mt_exit(void)
 {
-	xt_unregister_match(&connlimit_mt_reg);
+	xt_unregister_matches(connlimit_mt_reg, ARRAY_SIZE(connlimit_mt_reg));
 }
 
 module_init(connlimit_mt_init);
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index e536710..4ef1b63 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -112,6 +112,54 @@
 	return true;
 }
 
+static inline bool
+port_match(u16 min, u16 max, u16 port, bool invert)
+{
+	return (port >= min && port <= max) ^ invert;
+}
+
+static inline bool
+ct_proto_port_check_v3(const struct xt_conntrack_mtinfo3 *info,
+		       const struct nf_conn *ct)
+{
+	const struct nf_conntrack_tuple *tuple;
+
+	tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+	if ((info->match_flags & XT_CONNTRACK_PROTO) &&
+	    (nf_ct_protonum(ct) == info->l4proto) ^
+	    !(info->invert_flags & XT_CONNTRACK_PROTO))
+		return false;
+
+	/* Shortcut to match all recognized protocols by using ->src.all. */
+	if ((info->match_flags & XT_CONNTRACK_ORIGSRC_PORT) &&
+	    !port_match(info->origsrc_port, info->origsrc_port_high,
+			ntohs(tuple->src.u.all),
+			info->invert_flags & XT_CONNTRACK_ORIGSRC_PORT))
+		return false;
+
+	if ((info->match_flags & XT_CONNTRACK_ORIGDST_PORT) &&
+	    !port_match(info->origdst_port, info->origdst_port_high,
+			ntohs(tuple->dst.u.all),
+			info->invert_flags & XT_CONNTRACK_ORIGDST_PORT))
+		return false;
+
+	tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
+
+	if ((info->match_flags & XT_CONNTRACK_REPLSRC_PORT) &&
+	    !port_match(info->replsrc_port, info->replsrc_port_high,
+			ntohs(tuple->src.u.all),
+			info->invert_flags & XT_CONNTRACK_REPLSRC_PORT))
+		return false;
+
+	if ((info->match_flags & XT_CONNTRACK_REPLDST_PORT) &&
+	    !port_match(info->repldst_port, info->repldst_port_high,
+			ntohs(tuple->dst.u.all),
+			info->invert_flags & XT_CONNTRACK_REPLDST_PORT))
+		return false;
+
+	return true;
+}
+
 static bool
 conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
              u16 state_mask, u16 status_mask)
@@ -170,8 +218,13 @@
 		    !(info->invert_flags & XT_CONNTRACK_REPLDST))
 			return false;
 
-	if (!ct_proto_port_check(info, ct))
-		return false;
+	if (par->match->revision != 3) {
+		if (!ct_proto_port_check(info, ct))
+			return false;
+	} else {
+		if (!ct_proto_port_check_v3(par->matchinfo, ct))
+			return false;
+	}
 
 	if ((info->match_flags & XT_CONNTRACK_STATUS) &&
 	    (!!(status_mask & ct->status) ^
@@ -207,6 +260,14 @@
 	return conntrack_mt(skb, par, info->state_mask, info->status_mask);
 }
 
+static bool
+conntrack_mt_v3(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct xt_conntrack_mtinfo3 *info = par->matchinfo;
+
+	return conntrack_mt(skb, par, info->state_mask, info->status_mask);
+}
+
 static int conntrack_mt_check(const struct xt_mtchk_param *par)
 {
 	int ret;
@@ -244,6 +305,16 @@
 		.destroy    = conntrack_mt_destroy,
 		.me         = THIS_MODULE,
 	},
+	{
+		.name       = "conntrack",
+		.revision   = 3,
+		.family     = NFPROTO_UNSPEC,
+		.matchsize  = sizeof(struct xt_conntrack_mtinfo3),
+		.match      = conntrack_mt_v3,
+		.checkentry = conntrack_mt_check,
+		.destroy    = conntrack_mt_destroy,
+		.me         = THIS_MODULE,
+	},
 };
 
 static int __init conntrack_mt_init(void)
diff --git a/net/netfilter/xt_cpu.c b/net/netfilter/xt_cpu.c
index b39db8a..c7a2e54 100644
--- a/net/netfilter/xt_cpu.c
+++ b/net/netfilter/xt_cpu.c
@@ -22,6 +22,8 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Eric Dumazet <eric.dumazet@gmail.com>");
 MODULE_DESCRIPTION("Xtables: CPU match");
+MODULE_ALIAS("ipt_cpu");
+MODULE_ALIAS("ip6t_cpu");
 
 static int cpu_mt_check(const struct xt_mtchk_param *par)
 {
diff --git a/net/netfilter/xt_devgroup.c b/net/netfilter/xt_devgroup.c
new file mode 100644
index 0000000..d9202cd
--- /dev/null
+++ b/net/netfilter/xt_devgroup.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+#include <linux/netfilter/xt_devgroup.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xtables: Device group match");
+MODULE_ALIAS("ipt_devgroup");
+MODULE_ALIAS("ip6t_devgroup");
+
+static bool devgroup_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct xt_devgroup_info *info = par->matchinfo;
+
+	if (info->flags & XT_DEVGROUP_MATCH_SRC &&
+	    (((info->src_group ^ par->in->group) & info->src_mask ? 1 : 0) ^
+	     ((info->flags & XT_DEVGROUP_INVERT_SRC) ? 1 : 0)))
+		return false;
+
+	if (info->flags & XT_DEVGROUP_MATCH_DST &&
+	    (((info->dst_group ^ par->out->group) & info->dst_mask ? 1 : 0) ^
+	     ((info->flags & XT_DEVGROUP_INVERT_DST) ? 1 : 0)))
+		return false;
+
+	return true;
+}
+
+static int devgroup_mt_checkentry(const struct xt_mtchk_param *par)
+{
+	const struct xt_devgroup_info *info = par->matchinfo;
+
+	if (info->flags & ~(XT_DEVGROUP_MATCH_SRC | XT_DEVGROUP_INVERT_SRC |
+			    XT_DEVGROUP_MATCH_DST | XT_DEVGROUP_INVERT_DST))
+		return -EINVAL;
+
+	if (info->flags & XT_DEVGROUP_MATCH_SRC &&
+	    par->hook_mask & ~((1 << NF_INET_PRE_ROUTING) |
+			       (1 << NF_INET_LOCAL_IN) |
+			       (1 << NF_INET_FORWARD)))
+		return -EINVAL;
+
+	if (info->flags & XT_DEVGROUP_MATCH_DST &&
+	    par->hook_mask & ~((1 << NF_INET_FORWARD) |
+			       (1 << NF_INET_LOCAL_OUT) |
+			       (1 << NF_INET_POST_ROUTING)))
+		return -EINVAL;
+
+	return 0;
+}
+
+static struct xt_match devgroup_mt_reg __read_mostly = {
+	.name		= "devgroup",
+	.match		= devgroup_mt,
+	.checkentry	= devgroup_mt_checkentry,
+	.matchsize	= sizeof(struct xt_devgroup_info),
+	.family		= NFPROTO_UNSPEC,
+	.me		= THIS_MODULE
+};
+
+static int __init devgroup_mt_init(void)
+{
+	return xt_register_match(&devgroup_mt_reg);
+}
+
+static void __exit devgroup_mt_exit(void)
+{
+	xt_unregister_match(&devgroup_mt_reg);
+}
+
+module_init(devgroup_mt_init);
+module_exit(devgroup_mt_exit);
diff --git a/net/netfilter/xt_iprange.c b/net/netfilter/xt_iprange.c
index 88f7c35..b46626c 100644
--- a/net/netfilter/xt_iprange.c
+++ b/net/netfilter/xt_iprange.c
@@ -31,7 +31,7 @@
 			pr_debug("src IP %pI4 NOT in range %s%pI4-%pI4\n",
 			         &iph->saddr,
 			         (info->flags & IPRANGE_SRC_INV) ? "(INV) " : "",
-			         &info->src_max.ip,
+			         &info->src_min.ip,
 			         &info->src_max.ip);
 			return false;
 		}
@@ -53,15 +53,13 @@
 }
 
 static inline int
-iprange_ipv6_sub(const struct in6_addr *a, const struct in6_addr *b)
+iprange_ipv6_lt(const struct in6_addr *a, const struct in6_addr *b)
 {
 	unsigned int i;
-	int r;
 
 	for (i = 0; i < 4; ++i) {
-		r = ntohl(a->s6_addr32[i]) - ntohl(b->s6_addr32[i]);
-		if (r != 0)
-			return r;
+		if (a->s6_addr32[i] != b->s6_addr32[i])
+			return ntohl(a->s6_addr32[i]) < ntohl(b->s6_addr32[i]);
 	}
 
 	return 0;
@@ -75,18 +73,30 @@
 	bool m;
 
 	if (info->flags & IPRANGE_SRC) {
-		m  = iprange_ipv6_sub(&iph->saddr, &info->src_min.in6) < 0;
-		m |= iprange_ipv6_sub(&iph->saddr, &info->src_max.in6) > 0;
+		m  = iprange_ipv6_lt(&iph->saddr, &info->src_min.in6);
+		m |= iprange_ipv6_lt(&info->src_max.in6, &iph->saddr);
 		m ^= !!(info->flags & IPRANGE_SRC_INV);
-		if (m)
+		if (m) {
+			pr_debug("src IP %pI6 NOT in range %s%pI6-%pI6\n",
+				 &iph->saddr,
+				 (info->flags & IPRANGE_SRC_INV) ? "(INV) " : "",
+				 &info->src_min.in6,
+				 &info->src_max.in6);
 			return false;
+		}
 	}
 	if (info->flags & IPRANGE_DST) {
-		m  = iprange_ipv6_sub(&iph->daddr, &info->dst_min.in6) < 0;
-		m |= iprange_ipv6_sub(&iph->daddr, &info->dst_max.in6) > 0;
+		m  = iprange_ipv6_lt(&iph->daddr, &info->dst_min.in6);
+		m |= iprange_ipv6_lt(&info->dst_max.in6, &iph->daddr);
 		m ^= !!(info->flags & IPRANGE_DST_INV);
-		if (m)
+		if (m) {
+			pr_debug("dst IP %pI6 NOT in range %s%pI6-%pI6\n",
+				 &iph->daddr,
+				 (info->flags & IPRANGE_DST_INV) ? "(INV) " : "",
+				 &info->dst_min.in6,
+				 &info->dst_max.in6);
 			return false;
+		}
 	}
 	return true;
 }
diff --git a/net/netfilter/xt_ipvs.c b/net/netfilter/xt_ipvs.c
index 9127a3d..bb10b07 100644
--- a/net/netfilter/xt_ipvs.c
+++ b/net/netfilter/xt_ipvs.c
@@ -85,7 +85,7 @@
 	/*
 	 * Check if the packet belongs to an existing entry
 	 */
-	cp = pp->conn_out_get(family, skb, pp, &iph, iph.len, 1 /* inverse */);
+	cp = pp->conn_out_get(family, skb, &iph, iph.len, 1 /* inverse */);
 	if (unlikely(cp == NULL)) {
 		match = false;
 		goto out;
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
new file mode 100644
index 0000000..061d48c
--- /dev/null
+++ b/net/netfilter/xt_set.c
@@ -0,0 +1,359 @@
+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
+ *                         Patrick Schaaf <bof@bof.de>
+ *                         Martin Josefsson <gandalf@wlug.westbo.se>
+ * Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module which implements the set match and SET target
+ * for netfilter/iptables. */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_set.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_DESCRIPTION("Xtables: IP set match and target module");
+MODULE_ALIAS("xt_SET");
+MODULE_ALIAS("ipt_set");
+MODULE_ALIAS("ip6t_set");
+MODULE_ALIAS("ipt_SET");
+MODULE_ALIAS("ip6t_SET");
+
+static inline int
+match_set(ip_set_id_t index, const struct sk_buff *skb,
+	  u8 pf, u8 dim, u8 flags, int inv)
+{
+	if (ip_set_test(index, skb, pf, dim, flags))
+		inv = !inv;
+	return inv;
+}
+
+/* Revision 0 interface: backward compatible with netfilter/iptables */
+
+static bool
+set_match_v0(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct xt_set_info_match_v0 *info = par->matchinfo;
+
+	return match_set(info->match_set.index, skb, par->family,
+			 info->match_set.u.compat.dim,
+			 info->match_set.u.compat.flags,
+			 info->match_set.u.compat.flags & IPSET_INV_MATCH);
+}
+
+static void
+compat_flags(struct xt_set_info_v0 *info)
+{
+	u_int8_t i;
+
+	/* Fill out compatibility data according to enum ip_set_kopt */
+	info->u.compat.dim = IPSET_DIM_ZERO;
+	if (info->u.flags[0] & IPSET_MATCH_INV)
+		info->u.compat.flags |= IPSET_INV_MATCH;
+	for (i = 0; i < IPSET_DIM_MAX-1 && info->u.flags[i]; i++) {
+		info->u.compat.dim++;
+		if (info->u.flags[i] & IPSET_SRC)
+			info->u.compat.flags |= (1<<info->u.compat.dim);
+	}
+}
+
+static int
+set_match_v0_checkentry(const struct xt_mtchk_param *par)
+{
+	struct xt_set_info_match_v0 *info = par->matchinfo;
+	ip_set_id_t index;
+
+	index = ip_set_nfnl_get_byindex(info->match_set.index);
+
+	if (index == IPSET_INVALID_ID) {
+		pr_warning("Cannot find set indentified by id %u to match\n",
+			   info->match_set.index);
+		return -ENOENT;
+	}
+	if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
+		pr_warning("Protocol error: set match dimension "
+			   "is over the limit!\n");
+		return -ERANGE;
+	}
+
+	/* Fill out compatibility data */
+	compat_flags(&info->match_set);
+
+	return 0;
+}
+
+static void
+set_match_v0_destroy(const struct xt_mtdtor_param *par)
+{
+	struct xt_set_info_match_v0 *info = par->matchinfo;
+
+	ip_set_nfnl_put(info->match_set.index);
+}
+
+static unsigned int
+set_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	const struct xt_set_info_target_v0 *info = par->targinfo;
+
+	if (info->add_set.index != IPSET_INVALID_ID)
+		ip_set_add(info->add_set.index, skb, par->family,
+			   info->add_set.u.compat.dim,
+			   info->add_set.u.compat.flags);
+	if (info->del_set.index != IPSET_INVALID_ID)
+		ip_set_del(info->del_set.index, skb, par->family,
+			   info->del_set.u.compat.dim,
+			   info->del_set.u.compat.flags);
+
+	return XT_CONTINUE;
+}
+
+static int
+set_target_v0_checkentry(const struct xt_tgchk_param *par)
+{
+	struct xt_set_info_target_v0 *info = par->targinfo;
+	ip_set_id_t index;
+
+	if (info->add_set.index != IPSET_INVALID_ID) {
+		index = ip_set_nfnl_get_byindex(info->add_set.index);
+		if (index == IPSET_INVALID_ID) {
+			pr_warning("Cannot find add_set index %u as target\n",
+				   info->add_set.index);
+			return -ENOENT;
+		}
+	}
+
+	if (info->del_set.index != IPSET_INVALID_ID) {
+		index = ip_set_nfnl_get_byindex(info->del_set.index);
+		if (index == IPSET_INVALID_ID) {
+			pr_warning("Cannot find del_set index %u as target\n",
+				   info->del_set.index);
+			return -ENOENT;
+		}
+	}
+	if (info->add_set.u.flags[IPSET_DIM_MAX-1] != 0 ||
+	    info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) {
+		pr_warning("Protocol error: SET target dimension "
+			   "is over the limit!\n");
+		return -ERANGE;
+	}
+
+	/* Fill out compatibility data */
+	compat_flags(&info->add_set);
+	compat_flags(&info->del_set);
+
+	return 0;
+}
+
+static void
+set_target_v0_destroy(const struct xt_tgdtor_param *par)
+{
+	const struct xt_set_info_target_v0 *info = par->targinfo;
+
+	if (info->add_set.index != IPSET_INVALID_ID)
+		ip_set_nfnl_put(info->add_set.index);
+	if (info->del_set.index != IPSET_INVALID_ID)
+		ip_set_nfnl_put(info->del_set.index);
+}
+
+/* Revision 1: current interface to netfilter/iptables */
+
+static bool
+set_match(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct xt_set_info_match *info = par->matchinfo;
+
+	return match_set(info->match_set.index, skb, par->family,
+			 info->match_set.dim,
+			 info->match_set.flags,
+			 info->match_set.flags & IPSET_INV_MATCH);
+}
+
+static int
+set_match_checkentry(const struct xt_mtchk_param *par)
+{
+	struct xt_set_info_match *info = par->matchinfo;
+	ip_set_id_t index;
+
+	index = ip_set_nfnl_get_byindex(info->match_set.index);
+
+	if (index == IPSET_INVALID_ID) {
+		pr_warning("Cannot find set indentified by id %u to match\n",
+			   info->match_set.index);
+		return -ENOENT;
+	}
+	if (info->match_set.dim > IPSET_DIM_MAX) {
+		pr_warning("Protocol error: set match dimension "
+			   "is over the limit!\n");
+		return -ERANGE;
+	}
+
+	return 0;
+}
+
+static void
+set_match_destroy(const struct xt_mtdtor_param *par)
+{
+	struct xt_set_info_match *info = par->matchinfo;
+
+	ip_set_nfnl_put(info->match_set.index);
+}
+
+static unsigned int
+set_target(struct sk_buff *skb, const struct xt_action_param *par)
+{
+	const struct xt_set_info_target *info = par->targinfo;
+
+	if (info->add_set.index != IPSET_INVALID_ID)
+		ip_set_add(info->add_set.index,
+			   skb, par->family,
+			   info->add_set.dim,
+			   info->add_set.flags);
+	if (info->del_set.index != IPSET_INVALID_ID)
+		ip_set_del(info->del_set.index,
+			   skb, par->family,
+			   info->add_set.dim,
+			   info->del_set.flags);
+
+	return XT_CONTINUE;
+}
+
+static int
+set_target_checkentry(const struct xt_tgchk_param *par)
+{
+	const struct xt_set_info_target *info = par->targinfo;
+	ip_set_id_t index;
+
+	if (info->add_set.index != IPSET_INVALID_ID) {
+		index = ip_set_nfnl_get_byindex(info->add_set.index);
+		if (index == IPSET_INVALID_ID) {
+			pr_warning("Cannot find add_set index %u as target\n",
+				   info->add_set.index);
+			return -ENOENT;
+		}
+	}
+
+	if (info->del_set.index != IPSET_INVALID_ID) {
+		index = ip_set_nfnl_get_byindex(info->del_set.index);
+		if (index == IPSET_INVALID_ID) {
+			pr_warning("Cannot find del_set index %u as target\n",
+				   info->del_set.index);
+			return -ENOENT;
+		}
+	}
+	if (info->add_set.dim > IPSET_DIM_MAX ||
+	    info->del_set.flags > IPSET_DIM_MAX) {
+		pr_warning("Protocol error: SET target dimension "
+			   "is over the limit!\n");
+		return -ERANGE;
+	}
+
+	return 0;
+}
+
+static void
+set_target_destroy(const struct xt_tgdtor_param *par)
+{
+	const struct xt_set_info_target *info = par->targinfo;
+
+	if (info->add_set.index != IPSET_INVALID_ID)
+		ip_set_nfnl_put(info->add_set.index);
+	if (info->del_set.index != IPSET_INVALID_ID)
+		ip_set_nfnl_put(info->del_set.index);
+}
+
+static struct xt_match set_matches[] __read_mostly = {
+	{
+		.name		= "set",
+		.family		= NFPROTO_IPV4,
+		.revision	= 0,
+		.match		= set_match_v0,
+		.matchsize	= sizeof(struct xt_set_info_match_v0),
+		.checkentry	= set_match_v0_checkentry,
+		.destroy	= set_match_v0_destroy,
+		.me		= THIS_MODULE
+	},
+	{
+		.name		= "set",
+		.family		= NFPROTO_IPV4,
+		.revision	= 1,
+		.match		= set_match,
+		.matchsize	= sizeof(struct xt_set_info_match),
+		.checkentry	= set_match_checkentry,
+		.destroy	= set_match_destroy,
+		.me		= THIS_MODULE
+	},
+	{
+		.name		= "set",
+		.family		= NFPROTO_IPV6,
+		.revision	= 1,
+		.match		= set_match,
+		.matchsize	= sizeof(struct xt_set_info_match),
+		.checkentry	= set_match_checkentry,
+		.destroy	= set_match_destroy,
+		.me		= THIS_MODULE
+	},
+};
+
+static struct xt_target set_targets[] __read_mostly = {
+	{
+		.name		= "SET",
+		.revision	= 0,
+		.family		= NFPROTO_IPV4,
+		.target		= set_target_v0,
+		.targetsize	= sizeof(struct xt_set_info_target_v0),
+		.checkentry	= set_target_v0_checkentry,
+		.destroy	= set_target_v0_destroy,
+		.me		= THIS_MODULE
+	},
+	{
+		.name		= "SET",
+		.revision	= 1,
+		.family		= NFPROTO_IPV4,
+		.target		= set_target,
+		.targetsize	= sizeof(struct xt_set_info_target),
+		.checkentry	= set_target_checkentry,
+		.destroy	= set_target_destroy,
+		.me		= THIS_MODULE
+	},
+	{
+		.name		= "SET",
+		.revision	= 1,
+		.family		= NFPROTO_IPV6,
+		.target		= set_target,
+		.targetsize	= sizeof(struct xt_set_info_target),
+		.checkentry	= set_target_checkentry,
+		.destroy	= set_target_destroy,
+		.me		= THIS_MODULE
+	},
+};
+
+static int __init xt_set_init(void)
+{
+	int ret = xt_register_matches(set_matches, ARRAY_SIZE(set_matches));
+
+	if (!ret) {
+		ret = xt_register_targets(set_targets,
+					  ARRAY_SIZE(set_targets));
+		if (ret)
+			xt_unregister_matches(set_matches,
+					      ARRAY_SIZE(set_matches));
+	}
+	return ret;
+}
+
+static void __exit xt_set_fini(void)
+{
+	xt_unregister_matches(set_matches, ARRAY_SIZE(set_matches));
+	xt_unregister_targets(set_targets, ARRAY_SIZE(set_targets));
+}
+
+module_init(xt_set_init);
+module_exit(xt_set_fini);
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 00d6ae83..9cc4635 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -35,6 +35,15 @@
 #include <net/netfilter/nf_conntrack.h>
 #endif
 
+static void
+xt_socket_put_sk(struct sock *sk)
+{
+	if (sk->sk_state == TCP_TIME_WAIT)
+		inet_twsk_put(inet_twsk(sk));
+	else
+		sock_put(sk);
+}
+
 static int
 extract_icmp4_fields(const struct sk_buff *skb,
 		    u8 *protocol,
@@ -164,7 +173,7 @@
 				       (sk->sk_state == TCP_TIME_WAIT &&
 					inet_twsk(sk)->tw_transparent));
 
-		nf_tproxy_put_sock(sk);
+		xt_socket_put_sk(sk);
 
 		if (wildcard || !transparent)
 			sk = NULL;
@@ -298,7 +307,7 @@
 				       (sk->sk_state == TCP_TIME_WAIT &&
 					inet_twsk(sk)->tw_transparent));
 
-		nf_tproxy_put_sock(sk);
+		xt_socket_put_sk(sk);
 
 		if (wildcard || !transparent)
 			sk = NULL;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index f83cb37..1781d99 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -519,7 +519,7 @@
 	    security_netlink_recv(skb, CAP_NET_ADMIN))
 		return -EPERM;
 
-	if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
+	if (nlh->nlmsg_flags & NLM_F_DUMP) {
 		if (ops->dumpit == NULL)
 			return -EOPNOTSUPP;
 
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 91cb1d7..5efef5b 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -164,7 +164,6 @@
 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
 		int closing, int tx_ring);
 
-#define PGV_FROM_VMALLOC 1
 struct pgv {
 	char *buffer;
 };
@@ -466,7 +465,7 @@
 	 */
 
 	err = -EMSGSIZE;
-	if (len > dev->mtu + dev->hard_header_len)
+	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN)
 		goto out_unlock;
 
 	if (!skb) {
@@ -497,6 +496,19 @@
 		goto retry;
 	}
 
+	if (len > (dev->mtu + dev->hard_header_len)) {
+		/* Earlier code assumed this would be a VLAN pkt,
+		 * double-check this now that we have the actual
+		 * packet in hand.
+		 */
+		struct ethhdr *ehdr;
+		skb_reset_mac_header(skb);
+		ehdr = eth_hdr(skb);
+		if (ehdr->h_proto != htons(ETH_P_8021Q)) {
+			err = -EMSGSIZE;
+			goto out_unlock;
+		}
+	}
 
 	skb->protocol = proto;
 	skb->dev = dev;
@@ -523,11 +535,11 @@
 {
 	struct sk_filter *filter;
 
-	rcu_read_lock_bh();
-	filter = rcu_dereference_bh(sk->sk_filter);
+	rcu_read_lock();
+	filter = rcu_dereference(sk->sk_filter);
 	if (filter != NULL)
 		res = sk_run_filter(skb, filter->insns);
-	rcu_read_unlock_bh();
+	rcu_read_unlock();
 
 	return res;
 }
@@ -1200,7 +1212,7 @@
 	}
 
 	err = -EMSGSIZE;
-	if (!gso_type && (len > dev->mtu+reserve))
+	if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN))
 		goto out_unlock;
 
 	err = -ENOBUFS;
@@ -1225,6 +1237,20 @@
 	if (err < 0)
 		goto out_free;
 
+	if (!gso_type && (len > dev->mtu + reserve)) {
+		/* Earlier code assumed this would be a VLAN pkt,
+		 * double-check this now that we have the actual
+		 * packet in hand.
+		 */
+		struct ethhdr *ehdr;
+		skb_reset_mac_header(skb);
+		ehdr = eth_hdr(skb);
+		if (ehdr->h_proto != htons(ETH_P_8021Q)) {
+			err = -EMSGSIZE;
+			goto out_free;
+		}
+	}
+
 	skb->protocol = proto;
 	skb->dev = dev;
 	skb->priority = sk->sk_priority;
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 9542449..da8adac 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -50,7 +50,6 @@
 #define RDS_FRAG_SIZE	((unsigned int)(1 << RDS_FRAG_SHIFT))
 
 #define RDS_CONG_MAP_BYTES	(65536 / 8)
-#define RDS_CONG_MAP_LONGS	(RDS_CONG_MAP_BYTES / sizeof(unsigned long))
 #define RDS_CONG_MAP_PAGES	(PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
 #define RDS_CONG_MAP_PAGE_BITS	(PAGE_SIZE * 8)
 
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index eaf7658..7fce6df 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -18,7 +18,7 @@
 	default y
 
 config RFKILL_INPUT
-	bool "RF switch input support" if EMBEDDED
+	bool "RF switch input support" if EXPERT
 	depends on RFKILL
 	depends on INPUT = y || RFKILL = INPUT
-	default y if !EMBEDDED
+	default y if !EXPERT
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index d952e7e..5ee0c62 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -803,7 +803,6 @@
 
 		rose_insert_socket(sk);		/* Finish the bind */
 	}
-rose_try_next_neigh:
 	rose->dest_addr   = addr->srose_addr;
 	rose->dest_call   = addr->srose_call;
 	rose->rand        = ((long)rose & 0xFFFF) + rose->lci;
@@ -865,12 +864,6 @@
 	}
 
 	if (sk->sk_state != TCP_ESTABLISHED) {
-	/* Try next neighbour */
-		rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause, &diagnostic, 0);
-		if (rose->neighbour)
-			goto rose_try_next_neigh;
-
-		/* No more neighbours */
 		sock->state = SS_UNCONNECTED;
 		err = sock_error(sk);	/* Always set at this point */
 		goto out_release;
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index b4fdaac..88a77e9 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -674,29 +674,34 @@
  *	Find a neighbour or a route given a ROSE address.
  */
 struct rose_neigh *rose_get_neigh(rose_address *addr, unsigned char *cause,
-	unsigned char *diagnostic, int new)
+	unsigned char *diagnostic, int route_frame)
 {
 	struct rose_neigh *res = NULL;
 	struct rose_node *node;
 	int failed = 0;
 	int i;
 
-	if (!new) spin_lock_bh(&rose_node_list_lock);
+	if (!route_frame) spin_lock_bh(&rose_node_list_lock);
 	for (node = rose_node_list; node != NULL; node = node->next) {
 		if (rosecmpm(addr, &node->address, node->mask) == 0) {
 			for (i = 0; i < node->count; i++) {
-				if (new) {
-					if (node->neighbour[i]->restarted) {
-						res = node->neighbour[i];
-						goto out;
-					}
+				if (node->neighbour[i]->restarted) {
+					res = node->neighbour[i];
+					goto out;
 				}
-				else {
+			}
+		}
+	}
+	if (!route_frame) { /* connect request */
+		for (node = rose_node_list; node != NULL; node = node->next) {
+			if (rosecmpm(addr, &node->address, node->mask) == 0) {
+				for (i = 0; i < node->count; i++) {
 					if (!rose_ftimer_running(node->neighbour[i])) {
 						res = node->neighbour[i];
+						failed = 0;
 						goto out;
-					} else
-						failed = 1;
+					}
+					failed = 1;
 				}
 			}
 		}
@@ -711,8 +716,7 @@
 	}
 
 out:
-	if (!new) spin_unlock_bh(&rose_node_list_lock);
-
+	if (!route_frame) spin_unlock_bh(&rose_node_list_lock);
 	return res;
 }
 
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index f04d4a4..8c19b6e 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -205,6 +205,29 @@
 
 	  If unsure, say N.
 
+config NET_SCH_MQPRIO
+	tristate "Multi-queue priority scheduler (MQPRIO)"
+	help
+	  Say Y here if you want to use the Multi-queue Priority scheduler.
+	  This scheduler allows QOS to be offloaded on NICs that have support
+	  for offloading QOS schedulers.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called sch_mqprio.
+
+	  If unsure, say N.
+
+config NET_SCH_CHOKE
+	tristate "CHOose and Keep responsive flow scheduler (CHOKE)"
+	help
+	  Say Y here if you want to use the CHOKe packet scheduler (CHOose
+	  and Keep for responsive flows, CHOose and Kill for unresponsive
+	  flows). This is a variation of RED which trys to penalize flows
+	  that monopolize the queue.
+
+	  To compile this code as a module, choose M here: the
+	  module will be called sch_choke.
+
 config NET_SCH_INGRESS
 	tristate "Ingress Qdisc"
 	depends on NET_CLS_ACT
@@ -243,7 +266,7 @@
 
 config NET_CLS_ROUTE4
 	tristate "Routing decision (ROUTE)"
-	select NET_CLS_ROUTE
+	select IP_ROUTE_CLASSID
 	select NET_CLS
 	---help---
 	  If you say Y here, you will be able to classify packets
@@ -252,9 +275,6 @@
 	  To compile this code as a module, choose M here: the
 	  module will be called cls_route.
 
-config NET_CLS_ROUTE
-	bool
-
 config NET_CLS_FW
 	tristate "Netfilter mark (FW)"
 	select NET_CLS
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 960f5db..06c6cdf 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -32,6 +32,9 @@
 obj-$(CONFIG_NET_SCH_ATM)	+= sch_atm.o
 obj-$(CONFIG_NET_SCH_NETEM)	+= sch_netem.o
 obj-$(CONFIG_NET_SCH_DRR)	+= sch_drr.o
+obj-$(CONFIG_NET_SCH_MQPRIO)	+= sch_mqprio.o
+obj-$(CONFIG_NET_SCH_CHOKE)	+= sch_choke.o
+
 obj-$(CONFIG_NET_CLS_U32)	+= cls_u32.o
 obj-$(CONFIG_NET_CLS_ROUTE4)	+= cls_route.o
 obj-$(CONFIG_NET_CLS_FW)	+= cls_fw.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 23b25f8..15873e1 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -78,7 +78,7 @@
 			   struct tc_action *a, struct tcf_hashinfo *hinfo)
 {
 	struct tcf_common *p;
-	int err = 0, index = -1,i = 0, s_i = 0, n_i = 0;
+	int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
 	struct nlattr *nest;
 
 	read_lock_bh(hinfo->lock);
@@ -126,7 +126,7 @@
 {
 	struct tcf_common *p, *s_p;
 	struct nlattr *nest;
-	int i= 0, n_i = 0;
+	int i = 0, n_i = 0;
 
 	nest = nla_nest_start(skb, a->order);
 	if (nest == NULL)
@@ -138,7 +138,7 @@
 		while (p != NULL) {
 			s_p = p->tcfc_next;
 			if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo))
-				 module_put(a->ops->owner);
+				module_put(a->ops->owner);
 			n_i++;
 			p = s_p;
 		}
@@ -447,7 +447,8 @@
 	nest = nla_nest_start(skb, TCA_OPTIONS);
 	if (nest == NULL)
 		goto nla_put_failure;
-	if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) {
+	err = tcf_action_dump_old(skb, a, bind, ref);
+	if (err > 0) {
 		nla_nest_end(skb, nest);
 		return err;
 	}
@@ -491,7 +492,7 @@
 	struct tc_action *a;
 	struct tc_action_ops *a_o;
 	char act_name[IFNAMSIZ];
-	struct nlattr *tb[TCA_ACT_MAX+1];
+	struct nlattr *tb[TCA_ACT_MAX + 1];
 	struct nlattr *kind;
 	int err;
 
@@ -549,9 +550,9 @@
 		goto err_free;
 
 	/* module count goes up only when brand new policy is created
-	   if it exists and is only bound to in a_o->init() then
-	   ACT_P_CREATED is not returned (a zero is).
-	*/
+	 * if it exists and is only bound to in a_o->init() then
+	 * ACT_P_CREATED is not returned (a zero is).
+	 */
 	if (err != ACT_P_CREATED)
 		module_put(a_o->owner);
 	a->ops = a_o;
@@ -569,7 +570,7 @@
 struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est,
 				  char *name, int ovr, int bind)
 {
-	struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
+	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
 	struct tc_action *head = NULL, *act, *act_prev = NULL;
 	int err;
 	int i;
@@ -697,7 +698,7 @@
 static struct tc_action *
 tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
 {
-	struct nlattr *tb[TCA_ACT_MAX+1];
+	struct nlattr *tb[TCA_ACT_MAX + 1];
 	struct tc_action *a;
 	int index;
 	int err;
@@ -770,7 +771,7 @@
 	struct tcamsg *t;
 	struct netlink_callback dcb;
 	struct nlattr *nest;
-	struct nlattr *tb[TCA_ACT_MAX+1];
+	struct nlattr *tb[TCA_ACT_MAX + 1];
 	struct nlattr *kind;
 	struct tc_action *a = create_a(0);
 	int err = -ENOMEM;
@@ -821,7 +822,8 @@
 	nlh->nlmsg_flags |= NLM_F_ROOT;
 	module_put(a->ops->owner);
 	kfree(a);
-	err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+	err = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+			     n->nlmsg_flags & NLM_F_ECHO);
 	if (err > 0)
 		return 0;
 
@@ -842,14 +844,14 @@
 	      u32 pid, int event)
 {
 	int i, ret;
-	struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
+	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
 	struct tc_action *head = NULL, *act, *act_prev = NULL;
 
 	ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
 	if (ret < 0)
 		return ret;
 
-	if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) {
+	if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
 		if (tb[1] != NULL)
 			return tca_action_flush(net, tb[1], n, pid);
 		else
@@ -892,7 +894,7 @@
 		/* now do the delete */
 		tcf_action_destroy(head, 0);
 		ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
-				     n->nlmsg_flags&NLM_F_ECHO);
+				     n->nlmsg_flags & NLM_F_ECHO);
 		if (ret > 0)
 			return 0;
 		return ret;
@@ -936,7 +938,7 @@
 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
 	NETLINK_CB(skb).dst_group = RTNLGRP_TC;
 
-	err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags&NLM_F_ECHO);
+	err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags & NLM_F_ECHO);
 	if (err > 0)
 		err = 0;
 	return err;
@@ -967,7 +969,7 @@
 
 	/* dump then free all the actions after update; inserted policy
 	 * stays intact
-	 * */
+	 */
 	ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags);
 	for (a = act; a; a = act) {
 		act = a->next;
@@ -993,8 +995,7 @@
 		return -EINVAL;
 	}
 
-	/* n->nlmsg_flags&NLM_F_CREATE
-	 * */
+	/* n->nlmsg_flags & NLM_F_CREATE */
 	switch (n->nlmsg_type) {
 	case RTM_NEWACTION:
 		/* we are going to assume all other flags
@@ -1003,7 +1004,7 @@
 		 * but since we want avoid ambiguity (eg when flags
 		 * is zero) then just set this
 		 */
-		if (n->nlmsg_flags&NLM_F_REPLACE)
+		if (n->nlmsg_flags & NLM_F_REPLACE)
 			ovr = 1;
 replay:
 		ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr);
@@ -1028,7 +1029,7 @@
 static struct nlattr *
 find_dump_kind(const struct nlmsghdr *n)
 {
-	struct nlattr *tb1, *tb2[TCA_ACT_MAX+1];
+	struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
 	struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
 	struct nlattr *nla[TCAA_MAX + 1];
 	struct nlattr *kind;
@@ -1071,9 +1072,8 @@
 	}
 
 	a_o = tc_lookup_action(kind);
-	if (a_o == NULL) {
+	if (a_o == NULL)
 		return 0;
-	}
 
 	memset(&a, 0, sizeof(struct tc_action));
 	a.ops = a_o;
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 83ddfc0..6cdf9ab 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -63,7 +63,7 @@
 	if (nla == NULL)
 		return -EINVAL;
 
-	err = nla_parse_nested(tb, TCA_CSUM_MAX, nla,csum_policy);
+	err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy);
 	if (err < 0)
 		return err;
 
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index c2ed90a..2b4ab4b 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -50,7 +50,7 @@
 }
 
 typedef int (*g_rand)(struct tcf_gact *gact);
-static g_rand gact_rand[MAX_RAND]= { NULL, gact_net_rand, gact_determ };
+static g_rand gact_rand[MAX_RAND] = { NULL, gact_net_rand, gact_determ };
 #endif /* CONFIG_GACT_PROB */
 
 static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = {
@@ -89,7 +89,7 @@
 		pc = tcf_hash_create(parm->index, est, a, sizeof(*gact),
 				     bind, &gact_idx_gen, &gact_hash_info);
 		if (IS_ERR(pc))
-		    return PTR_ERR(pc);
+			return PTR_ERR(pc);
 		ret = ACT_P_CREATED;
 	} else {
 		if (!ovr) {
@@ -205,9 +205,9 @@
 static int __init gact_init_module(void)
 {
 #ifdef CONFIG_GACT_PROB
-	printk(KERN_INFO "GACT probability on\n");
+	pr_info("GACT probability on\n");
 #else
-	printk(KERN_INFO "GACT probability NOT on\n");
+	pr_info("GACT probability NOT on\n");
 #endif
 	return tcf_register_action(&act_gact_ops);
 }
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index c2a7c20..9fc211a 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -138,7 +138,7 @@
 		pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind,
 				     &ipt_idx_gen, &ipt_hash_info);
 		if (IS_ERR(pc))
-		    return PTR_ERR(pc);
+			return PTR_ERR(pc);
 		ret = ACT_P_CREATED;
 	} else {
 		if (!ovr) {
@@ -162,7 +162,8 @@
 	if (unlikely(!t))
 		goto err2;
 
-	if ((err = ipt_init_target(t, tname, hook)) < 0)
+	err = ipt_init_target(t, tname, hook);
+	if (err < 0)
 		goto err3;
 
 	spin_lock_bh(&ipt->tcf_lock);
@@ -212,8 +213,9 @@
 	bstats_update(&ipt->tcf_bstats, skb);
 
 	/* yes, we have to worry about both in and out dev
-	 worry later - danger - this API seems to have changed
-	 from earlier kernels */
+	 * worry later - danger - this API seems to have changed
+	 * from earlier kernels
+	 */
 	par.in       = skb->dev;
 	par.out      = NULL;
 	par.hooknum  = ipt->tcfi_hook;
@@ -253,9 +255,9 @@
 	struct tc_cnt c;
 
 	/* for simple targets kernel size == user size
-	** user name = target name
-	** for foolproof you need to not assume this
-	*/
+	 * user name = target name
+	 * for foolproof you need to not assume this
+	 */
 
 	t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
 	if (unlikely(!t))
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index d765067..961386e 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -41,13 +41,13 @@
 	.lock	=	&mirred_lock,
 };
 
-static inline int tcf_mirred_release(struct tcf_mirred *m, int bind)
+static int tcf_mirred_release(struct tcf_mirred *m, int bind)
 {
 	if (m) {
 		if (bind)
 			m->tcf_bindcnt--;
 		m->tcf_refcnt--;
-		if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
+		if (!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
 			list_del(&m->tcfm_list);
 			if (m->tcfm_dev)
 				dev_put(m->tcfm_dev);
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 178a4bd..762b027 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -69,7 +69,7 @@
 		pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
 				     &nat_idx_gen, &nat_hash_info);
 		if (IS_ERR(pc))
-		    return PTR_ERR(pc);
+			return PTR_ERR(pc);
 		p = to_tcf_nat(pc);
 		ret = ACT_P_CREATED;
 	} else {
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 445bef7..50c7c06 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -70,7 +70,7 @@
 		pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
 				     &pedit_idx_gen, &pedit_hash_info);
 		if (IS_ERR(pc))
-		    return PTR_ERR(pc);
+			return PTR_ERR(pc);
 		p = to_pedit(pc);
 		keys = kmalloc(ksize, GFP_KERNEL);
 		if (keys == NULL) {
@@ -127,11 +127,9 @@
 	int i, munged = 0;
 	unsigned int off;
 
-	if (skb_cloned(skb)) {
-		if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
-			return p->tcf_action;
-		}
-	}
+	if (skb_cloned(skb) &&
+	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+		return p->tcf_action;
 
 	off = skb_network_offset(skb);
 
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index e2f08b1..8a16307 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -22,8 +22,8 @@
 #include <net/act_api.h>
 #include <net/netlink.h>
 
-#define L2T(p,L)   qdisc_l2t((p)->tcfp_R_tab, L)
-#define L2T_P(p,L) qdisc_l2t((p)->tcfp_P_tab, L)
+#define L2T(p, L)   qdisc_l2t((p)->tcfp_R_tab, L)
+#define L2T_P(p, L) qdisc_l2t((p)->tcfp_P_tab, L)
 
 #define POL_TAB_MASK     15
 static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
@@ -37,8 +37,7 @@
 };
 
 /* old policer structure from before tc actions */
-struct tc_police_compat
-{
+struct tc_police_compat {
 	u32			index;
 	int			action;
 	u32			limit;
@@ -139,7 +138,7 @@
 static int tcf_act_police_locate(struct nlattr *nla, struct nlattr *est,
 				 struct tc_action *a, int ovr, int bind)
 {
-	unsigned h;
+	unsigned int h;
 	int ret = 0, err;
 	struct nlattr *tb[TCA_POLICE_MAX + 1];
 	struct tc_police *parm;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 7287cff..a34a22d 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -47,7 +47,7 @@
 	/* print policy string followed by _ then packet count
 	 * Example if this was the 3rd packet and the string was "hello"
 	 * then it would look like "hello_3" (without quotes)
-	 **/
+	 */
 	pr_info("simple: %s_%d\n",
 	       (char *)d->tcfd_defdata, d->tcf_bstats.packets);
 	spin_unlock(&d->tcf_lock);
@@ -125,7 +125,7 @@
 		pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind,
 				     &simp_idx_gen, &simp_hash_info);
 		if (IS_ERR(pc))
-		    return PTR_ERR(pc);
+			return PTR_ERR(pc);
 
 		d = to_defact(pc);
 		ret = alloc_defdata(d, defdata);
@@ -149,7 +149,7 @@
 	return ret;
 }
 
-static inline int tcf_simp_cleanup(struct tc_action *a, int bind)
+static int tcf_simp_cleanup(struct tc_action *a, int bind)
 {
 	struct tcf_defact *d = a->priv;
 
@@ -158,8 +158,8 @@
 	return 0;
 }
 
-static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
-				int bind, int ref)
+static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
+			 int bind, int ref)
 {
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tcf_defact *d = a->priv;
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 836f5fe..5f6f0c7 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -113,7 +113,7 @@
 		pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind,
 				     &skbedit_idx_gen, &skbedit_hash_info);
 		if (IS_ERR(pc))
-		    return PTR_ERR(pc);
+			return PTR_ERR(pc);
 
 		d = to_skbedit(pc);
 		ret = ACT_P_CREATED;
@@ -144,7 +144,7 @@
 	return ret;
 }
 
-static inline int tcf_skbedit_cleanup(struct tc_action *a, int bind)
+static int tcf_skbedit_cleanup(struct tc_action *a, int bind)
 {
 	struct tcf_skbedit *d = a->priv;
 
@@ -153,8 +153,8 @@
 	return 0;
 }
 
-static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
-				int bind, int ref)
+static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
+			    int bind, int ref)
 {
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tcf_skbedit *d = a->priv;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 5fd0c28..bb2c523 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -85,7 +85,7 @@
 	int rc = -ENOENT;
 
 	write_lock(&cls_mod_lock);
-	for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next)
+	for (tp = &tcf_proto_base; (t = *tp) != NULL; tp = &t->next)
 		if (t == ops)
 			break;
 
@@ -111,7 +111,7 @@
 	u32 first = TC_H_MAKE(0xC0000000U, 0U);
 
 	if (tp)
-		first = tp->prio-1;
+		first = tp->prio - 1;
 
 	return first;
 }
@@ -149,7 +149,8 @@
 
 	if (prio == 0) {
 		/* If no priority is given, user wants we allocated it. */
-		if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
+		if (n->nlmsg_type != RTM_NEWTFILTER ||
+		    !(n->nlmsg_flags & NLM_F_CREATE))
 			return -ENOENT;
 		prio = TC_H_MAKE(0x80000000U, 0U);
 	}
@@ -176,7 +177,8 @@
 	}
 
 	/* Is it classful? */
-	if ((cops = q->ops->cl_ops) == NULL)
+	cops = q->ops->cl_ops;
+	if (!cops)
 		return -EINVAL;
 
 	if (cops->tcf_chain == NULL)
@@ -196,10 +198,11 @@
 		goto errout;
 
 	/* Check the chain for existence of proto-tcf with this priority */
-	for (back = chain; (tp=*back) != NULL; back = &tp->next) {
+	for (back = chain; (tp = *back) != NULL; back = &tp->next) {
 		if (tp->prio >= prio) {
 			if (tp->prio == prio) {
-				if (!nprio || (tp->protocol != protocol && protocol))
+				if (!nprio ||
+				    (tp->protocol != protocol && protocol))
 					goto errout;
 			} else
 				tp = NULL;
@@ -216,7 +219,8 @@
 			goto errout;
 
 		err = -ENOENT;
-		if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
+		if (n->nlmsg_type != RTM_NEWTFILTER ||
+		    !(n->nlmsg_flags & NLM_F_CREATE))
 			goto errout;
 
 
@@ -420,7 +424,8 @@
 
 	if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
 		return skb->len;
-	if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+	if (!dev)
 		return skb->len;
 
 	if (!tcm->tcm_parent)
@@ -429,7 +434,8 @@
 		q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
 	if (!q)
 		goto out;
-	if ((cops = q->ops->cl_ops) == NULL)
+	cops = q->ops->cl_ops;
+	if (!cops)
 		goto errout;
 	if (cops->tcf_chain == NULL)
 		goto errout;
@@ -444,8 +450,9 @@
 
 	s_t = cb->args[0];
 
-	for (tp=*chain, t=0; tp; tp = tp->next, t++) {
-		if (t < s_t) continue;
+	for (tp = *chain, t = 0; tp; tp = tp->next, t++) {
+		if (t < s_t)
+			continue;
 		if (TC_H_MAJ(tcm->tcm_info) &&
 		    TC_H_MAJ(tcm->tcm_info) != tp->prio)
 			continue;
@@ -468,10 +475,10 @@
 		arg.skb = skb;
 		arg.cb = cb;
 		arg.w.stop = 0;
-		arg.w.skip = cb->args[1]-1;
+		arg.w.skip = cb->args[1] - 1;
 		arg.w.count = 0;
 		tp->ops->walk(tp, &arg.w);
-		cb->args[1] = arg.w.count+1;
+		cb->args[1] = arg.w.count + 1;
 		if (arg.w.stop)
 			break;
 	}
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index f23d915..8be8872 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -21,14 +21,12 @@
 #include <net/act_api.h>
 #include <net/pkt_cls.h>
 
-struct basic_head
-{
+struct basic_head {
 	u32			hgenerator;
 	struct list_head	flist;
 };
 
-struct basic_filter
-{
+struct basic_filter {
 	u32			handle;
 	struct tcf_exts		exts;
 	struct tcf_ematch_tree	ematches;
@@ -92,8 +90,7 @@
 	return 0;
 }
 
-static inline void basic_delete_filter(struct tcf_proto *tp,
-				       struct basic_filter *f)
+static void basic_delete_filter(struct tcf_proto *tp, struct basic_filter *f)
 {
 	tcf_unbind_filter(tp, &f->res);
 	tcf_exts_destroy(tp, &f->exts);
@@ -135,9 +132,9 @@
 	[TCA_BASIC_EMATCHES]	= { .type = NLA_NESTED },
 };
 
-static inline int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f,
-				  unsigned long base, struct nlattr **tb,
-				  struct nlattr *est)
+static int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f,
+			   unsigned long base, struct nlattr **tb,
+			   struct nlattr *est)
 {
 	int err = -EINVAL;
 	struct tcf_exts e;
@@ -203,7 +200,7 @@
 		} while (--i > 0 && basic_get(tp, head->hgenerator));
 
 		if (i <= 0) {
-			printk(KERN_ERR "Insufficient number of handles\n");
+			pr_err("Insufficient number of handles\n");
 			goto errout;
 		}
 
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index d49c40f..32a3351 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -56,7 +56,8 @@
 {
 	struct cgroup_cls_state *cs;
 
-	if (!(cs = kzalloc(sizeof(*cs), GFP_KERNEL)))
+	cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+	if (!cs)
 		return ERR_PTR(-ENOMEM);
 
 	if (cgrp->parent)
@@ -94,8 +95,7 @@
 	return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
 }
 
-struct cls_cgroup_head
-{
+struct cls_cgroup_head {
 	u32			handle;
 	struct tcf_exts		exts;
 	struct tcf_ematch_tree	ematches;
@@ -166,7 +166,7 @@
 			     u32 handle, struct nlattr **tca,
 			     unsigned long *arg)
 {
-	struct nlattr *tb[TCA_CGROUP_MAX+1];
+	struct nlattr *tb[TCA_CGROUP_MAX + 1];
 	struct cls_cgroup_head *head = tp->root;
 	struct tcf_ematch_tree t;
 	struct tcf_exts e;
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 5b271a1..8ec0139 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -121,7 +121,7 @@
 		if (!pskb_network_may_pull(skb, sizeof(*iph)))
 			break;
 		iph = ip_hdr(skb);
-		if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+		if (iph->frag_off & htons(IP_MF | IP_OFFSET))
 			break;
 		poff = proto_ports_offset(iph->protocol);
 		if (poff >= 0 &&
@@ -163,7 +163,7 @@
 		if (!pskb_network_may_pull(skb, sizeof(*iph)))
 			break;
 		iph = ip_hdr(skb);
-		if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+		if (iph->frag_off & htons(IP_MF | IP_OFFSET))
 			break;
 		poff = proto_ports_offset(iph->protocol);
 		if (poff >= 0 &&
@@ -276,7 +276,7 @@
 
 static u32 flow_get_rtclassid(const struct sk_buff *skb)
 {
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 	if (skb_dst(skb))
 		return skb_dst(skb)->tclassid;
 #endif
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 93b0a7b..26e7bc4 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -31,14 +31,12 @@
 
 #define HTSIZE (PAGE_SIZE/sizeof(struct fw_filter *))
 
-struct fw_head
-{
+struct fw_head {
 	struct fw_filter *ht[HTSIZE];
 	u32 mask;
 };
 
-struct fw_filter
-{
+struct fw_filter {
 	struct fw_filter	*next;
 	u32			id;
 	struct tcf_result	res;
@@ -53,7 +51,7 @@
 	.police = TCA_FW_POLICE
 };
 
-static __inline__ int fw_hash(u32 handle)
+static inline int fw_hash(u32 handle)
 {
 	if (HTSIZE == 4096)
 		return ((handle >> 24) & 0xFFF) ^
@@ -82,14 +80,14 @@
 static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
 			  struct tcf_result *res)
 {
-	struct fw_head *head = (struct fw_head*)tp->root;
+	struct fw_head *head = (struct fw_head *)tp->root;
 	struct fw_filter *f;
 	int r;
 	u32 id = skb->mark;
 
 	if (head != NULL) {
 		id &= head->mask;
-		for (f=head->ht[fw_hash(id)]; f; f=f->next) {
+		for (f = head->ht[fw_hash(id)]; f; f = f->next) {
 			if (f->id == id) {
 				*res = f->res;
 #ifdef CONFIG_NET_CLS_IND
@@ -105,7 +103,8 @@
 		}
 	} else {
 		/* old method */
-		if (id && (TC_H_MAJ(id) == 0 || !(TC_H_MAJ(id^tp->q->handle)))) {
+		if (id && (TC_H_MAJ(id) == 0 ||
+			   !(TC_H_MAJ(id ^ tp->q->handle)))) {
 			res->classid = id;
 			res->class = 0;
 			return 0;
@@ -117,13 +116,13 @@
 
 static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
 {
-	struct fw_head *head = (struct fw_head*)tp->root;
+	struct fw_head *head = (struct fw_head *)tp->root;
 	struct fw_filter *f;
 
 	if (head == NULL)
 		return 0;
 
-	for (f=head->ht[fw_hash(handle)]; f; f=f->next) {
+	for (f = head->ht[fw_hash(handle)]; f; f = f->next) {
 		if (f->id == handle)
 			return (unsigned long)f;
 	}
@@ -139,8 +138,7 @@
 	return 0;
 }
 
-static inline void
-fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
+static void fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
 {
 	tcf_unbind_filter(tp, &f->res);
 	tcf_exts_destroy(tp, &f->exts);
@@ -156,8 +154,8 @@
 	if (head == NULL)
 		return;
 
-	for (h=0; h<HTSIZE; h++) {
-		while ((f=head->ht[h]) != NULL) {
+	for (h = 0; h < HTSIZE; h++) {
+		while ((f = head->ht[h]) != NULL) {
 			head->ht[h] = f->next;
 			fw_delete_filter(tp, f);
 		}
@@ -167,14 +165,14 @@
 
 static int fw_delete(struct tcf_proto *tp, unsigned long arg)
 {
-	struct fw_head *head = (struct fw_head*)tp->root;
-	struct fw_filter *f = (struct fw_filter*)arg;
+	struct fw_head *head = (struct fw_head *)tp->root;
+	struct fw_filter *f = (struct fw_filter *)arg;
 	struct fw_filter **fp;
 
 	if (head == NULL || f == NULL)
 		goto out;
 
-	for (fp=&head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) {
+	for (fp = &head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) {
 		if (*fp == f) {
 			tcf_tree_lock(tp);
 			*fp = f->next;
@@ -240,7 +238,7 @@
 		     struct nlattr **tca,
 		     unsigned long *arg)
 {
-	struct fw_head *head = (struct fw_head*)tp->root;
+	struct fw_head *head = (struct fw_head *)tp->root;
 	struct fw_filter *f = (struct fw_filter *) *arg;
 	struct nlattr *opt = tca[TCA_OPTIONS];
 	struct nlattr *tb[TCA_FW_MAX + 1];
@@ -302,7 +300,7 @@
 
 static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 {
-	struct fw_head *head = (struct fw_head*)tp->root;
+	struct fw_head *head = (struct fw_head *)tp->root;
 	int h;
 
 	if (head == NULL)
@@ -332,7 +330,7 @@
 		   struct sk_buff *skb, struct tcmsg *t)
 {
 	struct fw_head *head = (struct fw_head *)tp->root;
-	struct fw_filter *f = (struct fw_filter*)fh;
+	struct fw_filter *f = (struct fw_filter *)fh;
 	unsigned char *b = skb_tail_pointer(skb);
 	struct nlattr *nest;
 
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 694dcd8..d580cdf 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -23,34 +23,30 @@
 #include <net/pkt_cls.h>
 
 /*
-   1. For now we assume that route tags < 256.
-      It allows to use direct table lookups, instead of hash tables.
-   2. For now we assume that "from TAG" and "fromdev DEV" statements
-      are mutually  exclusive.
-   3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
+ * 1. For now we assume that route tags < 256.
+ *    It allows to use direct table lookups, instead of hash tables.
+ * 2. For now we assume that "from TAG" and "fromdev DEV" statements
+ *    are mutually  exclusive.
+ * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
  */
 
-struct route4_fastmap
-{
+struct route4_fastmap {
 	struct route4_filter	*filter;
 	u32			id;
 	int			iif;
 };
 
-struct route4_head
-{
+struct route4_head {
 	struct route4_fastmap	fastmap[16];
-	struct route4_bucket	*table[256+1];
+	struct route4_bucket	*table[256 + 1];
 };
 
-struct route4_bucket
-{
+struct route4_bucket {
 	/* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
-	struct route4_filter	*ht[16+16+1];
+	struct route4_filter	*ht[16 + 16 + 1];
 };
 
-struct route4_filter
-{
+struct route4_filter {
 	struct route4_filter	*next;
 	u32			id;
 	int			iif;
@@ -61,20 +57,20 @@
 	struct route4_bucket	*bkt;
 };
 
-#define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
+#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
 
 static const struct tcf_ext_map route_ext_map = {
 	.police = TCA_ROUTE4_POLICE,
 	.action = TCA_ROUTE4_ACT
 };
 
-static __inline__ int route4_fastmap_hash(u32 id, int iif)
+static inline int route4_fastmap_hash(u32 id, int iif)
 {
-	return id&0xF;
+	return id & 0xF;
 }
 
-static inline
-void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
+static void
+route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
 {
 	spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
 
@@ -83,32 +79,33 @@
 	spin_unlock_bh(root_lock);
 }
 
-static inline void
+static void
 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
 		   struct route4_filter *f)
 {
 	int h = route4_fastmap_hash(id, iif);
+
 	head->fastmap[h].id = id;
 	head->fastmap[h].iif = iif;
 	head->fastmap[h].filter = f;
 }
 
-static __inline__ int route4_hash_to(u32 id)
+static inline int route4_hash_to(u32 id)
 {
-	return id&0xFF;
+	return id & 0xFF;
 }
 
-static __inline__ int route4_hash_from(u32 id)
+static inline int route4_hash_from(u32 id)
 {
-	return (id>>16)&0xF;
+	return (id >> 16) & 0xF;
 }
 
-static __inline__ int route4_hash_iif(int iif)
+static inline int route4_hash_iif(int iif)
 {
-	return 16 + ((iif>>16)&0xF);
+	return 16 + ((iif >> 16) & 0xF);
 }
 
-static __inline__ int route4_hash_wild(void)
+static inline int route4_hash_wild(void)
 {
 	return 32;
 }
@@ -131,21 +128,22 @@
 static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
 			   struct tcf_result *res)
 {
-	struct route4_head *head = (struct route4_head*)tp->root;
+	struct route4_head *head = (struct route4_head *)tp->root;
 	struct dst_entry *dst;
 	struct route4_bucket *b;
 	struct route4_filter *f;
 	u32 id, h;
 	int iif, dont_cache = 0;
 
-	if ((dst = skb_dst(skb)) == NULL)
+	dst = skb_dst(skb);
+	if (!dst)
 		goto failure;
 
 	id = dst->tclassid;
 	if (head == NULL)
 		goto old_method;
 
-	iif = ((struct rtable*)dst)->fl.iif;
+	iif = ((struct rtable *)dst)->fl.iif;
 
 	h = route4_fastmap_hash(id, iif);
 	if (id == head->fastmap[h].id &&
@@ -161,7 +159,8 @@
 	h = route4_hash_to(id);
 
 restart:
-	if ((b = head->table[h]) != NULL) {
+	b = head->table[h];
+	if (b) {
 		for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
 			if (f->id == id)
 				ROUTE4_APPLY_RESULT();
@@ -197,8 +196,9 @@
 
 static inline u32 to_hash(u32 id)
 {
-	u32 h = id&0xFF;
-	if (id&0x8000)
+	u32 h = id & 0xFF;
+
+	if (id & 0x8000)
 		h += 256;
 	return h;
 }
@@ -211,17 +211,17 @@
 	if (!(id & 0x8000)) {
 		if (id > 255)
 			return 256;
-		return id&0xF;
+		return id & 0xF;
 	}
-	return 16 + (id&0xF);
+	return 16 + (id & 0xF);
 }
 
 static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
 {
-	struct route4_head *head = (struct route4_head*)tp->root;
+	struct route4_head *head = (struct route4_head *)tp->root;
 	struct route4_bucket *b;
 	struct route4_filter *f;
-	unsigned h1, h2;
+	unsigned int h1, h2;
 
 	if (!head)
 		return 0;
@@ -230,11 +230,12 @@
 	if (h1 > 256)
 		return 0;
 
-	h2 = from_hash(handle>>16);
+	h2 = from_hash(handle >> 16);
 	if (h2 > 32)
 		return 0;
 
-	if ((b = head->table[h1]) != NULL) {
+	b = head->table[h1];
+	if (b) {
 		for (f = b->ht[h2]; f; f = f->next)
 			if (f->handle == handle)
 				return (unsigned long)f;
@@ -251,7 +252,7 @@
 	return 0;
 }
 
-static inline void
+static void
 route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
 {
 	tcf_unbind_filter(tp, &f->res);
@@ -267,11 +268,12 @@
 	if (head == NULL)
 		return;
 
-	for (h1=0; h1<=256; h1++) {
+	for (h1 = 0; h1 <= 256; h1++) {
 		struct route4_bucket *b;
 
-		if ((b = head->table[h1]) != NULL) {
-			for (h2=0; h2<=32; h2++) {
+		b = head->table[h1];
+		if (b) {
+			for (h2 = 0; h2 <= 32; h2++) {
 				struct route4_filter *f;
 
 				while ((f = b->ht[h2]) != NULL) {
@@ -287,9 +289,9 @@
 
 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
 {
-	struct route4_head *head = (struct route4_head*)tp->root;
-	struct route4_filter **fp, *f = (struct route4_filter*)arg;
-	unsigned h = 0;
+	struct route4_head *head = (struct route4_head *)tp->root;
+	struct route4_filter **fp, *f = (struct route4_filter *)arg;
+	unsigned int h = 0;
 	struct route4_bucket *b;
 	int i;
 
@@ -299,7 +301,7 @@
 	h = f->handle;
 	b = f->bkt;
 
-	for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
+	for (fp = &b->ht[from_hash(h >> 16)]; *fp; fp = &(*fp)->next) {
 		if (*fp == f) {
 			tcf_tree_lock(tp);
 			*fp = f->next;
@@ -310,7 +312,7 @@
 
 			/* Strip tree */
 
-			for (i=0; i<=32; i++)
+			for (i = 0; i <= 32; i++)
 				if (b->ht[i])
 					return 0;
 
@@ -380,7 +382,8 @@
 	}
 
 	h1 = to_hash(nhandle);
-	if ((b = head->table[h1]) == NULL) {
+	b = head->table[h1];
+	if (!b) {
 		err = -ENOBUFS;
 		b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
 		if (b == NULL)
@@ -391,6 +394,7 @@
 		tcf_tree_unlock(tp);
 	} else {
 		unsigned int h2 = from_hash(nhandle >> 16);
+
 		err = -EEXIST;
 		for (fp = b->ht[h2]; fp; fp = fp->next)
 			if (fp->handle == f->handle)
@@ -444,7 +448,8 @@
 	if (err < 0)
 		return err;
 
-	if ((f = (struct route4_filter*)*arg) != NULL) {
+	f = (struct route4_filter *)*arg;
+	if (f) {
 		if (f->handle != handle && handle)
 			return -EINVAL;
 
@@ -481,7 +486,7 @@
 
 reinsert:
 	h = from_hash(f->handle >> 16);
-	for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
+	for (fp = &f->bkt->ht[h]; (f1 = *fp) != NULL; fp = &f1->next)
 		if (f->handle < f1->handle)
 			break;
 
@@ -492,7 +497,8 @@
 	if (old_handle && f->handle != old_handle) {
 		th = to_hash(old_handle);
 		h = from_hash(old_handle >> 16);
-		if ((b = head->table[th]) != NULL) {
+		b = head->table[th];
+		if (b) {
 			for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
 				if (*fp == f) {
 					*fp = f->next;
@@ -515,7 +521,7 @@
 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 {
 	struct route4_head *head = tp->root;
-	unsigned h, h1;
+	unsigned int h, h1;
 
 	if (head == NULL)
 		arg->stop = 1;
@@ -549,7 +555,7 @@
 static int route4_dump(struct tcf_proto *tp, unsigned long fh,
 		       struct sk_buff *skb, struct tcmsg *t)
 {
-	struct route4_filter *f = (struct route4_filter*)fh;
+	struct route4_filter *f = (struct route4_filter *)fh;
 	unsigned char *b = skb_tail_pointer(skb);
 	struct nlattr *nest;
 	u32 id;
@@ -563,15 +569,15 @@
 	if (nest == NULL)
 		goto nla_put_failure;
 
-	if (!(f->handle&0x8000)) {
-		id = f->id&0xFF;
+	if (!(f->handle & 0x8000)) {
+		id = f->id & 0xFF;
 		NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
 	}
-	if (f->handle&0x80000000) {
-		if ((f->handle>>16) != 0xFFFF)
+	if (f->handle & 0x80000000) {
+		if ((f->handle >> 16) != 0xFFFF)
 			NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
 	} else {
-		id = f->id>>16;
+		id = f->id >> 16;
 		NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
 	}
 	if (f->res.classid)
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 425a179..402c44b 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -66,28 +66,25 @@
    powerful classification engine.  */
 
 
-struct rsvp_head
-{
+struct rsvp_head {
 	u32			tmap[256/32];
 	u32			hgenerator;
 	u8			tgenerator;
 	struct rsvp_session	*ht[256];
 };
 
-struct rsvp_session
-{
+struct rsvp_session {
 	struct rsvp_session	*next;
 	__be32			dst[RSVP_DST_LEN];
 	struct tc_rsvp_gpi 	dpi;
 	u8			protocol;
 	u8			tunnelid;
 	/* 16 (src,sport) hash slots, and one wildcard source slot */
-	struct rsvp_filter	*ht[16+1];
+	struct rsvp_filter	*ht[16 + 1];
 };
 
 
-struct rsvp_filter
-{
+struct rsvp_filter {
 	struct rsvp_filter	*next;
 	__be32			src[RSVP_DST_LEN];
 	struct tc_rsvp_gpi	spi;
@@ -100,17 +97,19 @@
 	struct rsvp_session	*sess;
 };
 
-static __inline__ unsigned hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
+static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
 {
-	unsigned h = (__force __u32)dst[RSVP_DST_LEN-1];
+	unsigned int h = (__force __u32)dst[RSVP_DST_LEN - 1];
+
 	h ^= h>>16;
 	h ^= h>>8;
 	return (h ^ protocol ^ tunnelid) & 0xFF;
 }
 
-static __inline__ unsigned hash_src(__be32 *src)
+static inline unsigned int hash_src(__be32 *src)
 {
-	unsigned h = (__force __u32)src[RSVP_DST_LEN-1];
+	unsigned int h = (__force __u32)src[RSVP_DST_LEN-1];
+
 	h ^= h>>16;
 	h ^= h>>8;
 	h ^= h>>4;
@@ -134,10 +133,10 @@
 static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp,
 			 struct tcf_result *res)
 {
-	struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht;
+	struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
 	struct rsvp_session *s;
 	struct rsvp_filter *f;
-	unsigned h1, h2;
+	unsigned int h1, h2;
 	__be32 *dst, *src;
 	u8 protocol;
 	u8 tunnelid = 0;
@@ -162,13 +161,13 @@
 	src = &nhptr->saddr.s6_addr32[0];
 	dst = &nhptr->daddr.s6_addr32[0];
 	protocol = nhptr->nexthdr;
-	xprt = ((u8*)nhptr) + sizeof(struct ipv6hdr);
+	xprt = ((u8 *)nhptr) + sizeof(struct ipv6hdr);
 #else
 	src = &nhptr->saddr;
 	dst = &nhptr->daddr;
 	protocol = nhptr->protocol;
-	xprt = ((u8*)nhptr) + (nhptr->ihl<<2);
-	if (nhptr->frag_off & htons(IP_MF|IP_OFFSET))
+	xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
+	if (nhptr->frag_off & htons(IP_MF | IP_OFFSET))
 		return -1;
 #endif
 
@@ -176,10 +175,10 @@
 	h2 = hash_src(src);
 
 	for (s = sht[h1]; s; s = s->next) {
-		if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
+		if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
 		    protocol == s->protocol &&
 		    !(s->dpi.mask &
-		      (*(u32*)(xprt+s->dpi.offset)^s->dpi.key)) &&
+		      (*(u32 *)(xprt + s->dpi.offset) ^ s->dpi.key)) &&
 #if RSVP_DST_LEN == 4
 		    dst[0] == s->dst[0] &&
 		    dst[1] == s->dst[1] &&
@@ -188,8 +187,8 @@
 		    tunnelid == s->tunnelid) {
 
 			for (f = s->ht[h2]; f; f = f->next) {
-				if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN-1] &&
-				    !(f->spi.mask & (*(u32*)(xprt+f->spi.offset)^f->spi.key))
+				if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
+				    !(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
 #if RSVP_DST_LEN == 4
 				    &&
 				    src[0] == f->src[0] &&
@@ -205,7 +204,7 @@
 						return 0;
 
 					tunnelid = f->res.classid;
-					nhptr = (void*)(xprt + f->tunnelhdr - sizeof(*nhptr));
+					nhptr = (void *)(xprt + f->tunnelhdr - sizeof(*nhptr));
 					goto restart;
 				}
 			}
@@ -224,11 +223,11 @@
 
 static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle)
 {
-	struct rsvp_session **sht = ((struct rsvp_head*)tp->root)->ht;
+	struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
 	struct rsvp_session *s;
 	struct rsvp_filter *f;
-	unsigned h1 = handle&0xFF;
-	unsigned h2 = (handle>>8)&0xFF;
+	unsigned int h1 = handle & 0xFF;
+	unsigned int h2 = (handle >> 8) & 0xFF;
 
 	if (h2 > 16)
 		return 0;
@@ -258,7 +257,7 @@
 	return -ENOBUFS;
 }
 
-static inline void
+static void
 rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
 {
 	tcf_unbind_filter(tp, &f->res);
@@ -277,13 +276,13 @@
 
 	sht = data->ht;
 
-	for (h1=0; h1<256; h1++) {
+	for (h1 = 0; h1 < 256; h1++) {
 		struct rsvp_session *s;
 
 		while ((s = sht[h1]) != NULL) {
 			sht[h1] = s->next;
 
-			for (h2=0; h2<=16; h2++) {
+			for (h2 = 0; h2 <= 16; h2++) {
 				struct rsvp_filter *f;
 
 				while ((f = s->ht[h2]) != NULL) {
@@ -299,13 +298,13 @@
 
 static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
 {
-	struct rsvp_filter **fp, *f = (struct rsvp_filter*)arg;
-	unsigned h = f->handle;
+	struct rsvp_filter **fp, *f = (struct rsvp_filter *)arg;
+	unsigned int h = f->handle;
 	struct rsvp_session **sp;
 	struct rsvp_session *s = f->sess;
 	int i;
 
-	for (fp = &s->ht[(h>>8)&0xFF]; *fp; fp = &(*fp)->next) {
+	for (fp = &s->ht[(h >> 8) & 0xFF]; *fp; fp = &(*fp)->next) {
 		if (*fp == f) {
 			tcf_tree_lock(tp);
 			*fp = f->next;
@@ -314,12 +313,12 @@
 
 			/* Strip tree */
 
-			for (i=0; i<=16; i++)
+			for (i = 0; i <= 16; i++)
 				if (s->ht[i])
 					return 0;
 
 			/* OK, session has no flows */
-			for (sp = &((struct rsvp_head*)tp->root)->ht[h&0xFF];
+			for (sp = &((struct rsvp_head *)tp->root)->ht[h & 0xFF];
 			     *sp; sp = &(*sp)->next) {
 				if (*sp == s) {
 					tcf_tree_lock(tp);
@@ -337,13 +336,14 @@
 	return 0;
 }
 
-static unsigned gen_handle(struct tcf_proto *tp, unsigned salt)
+static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
 {
 	struct rsvp_head *data = tp->root;
 	int i = 0xFFFF;
 
 	while (i-- > 0) {
 		u32 h;
+
 		if ((data->hgenerator += 0x10000) == 0)
 			data->hgenerator = 0x10000;
 		h = data->hgenerator|salt;
@@ -355,10 +355,10 @@
 
 static int tunnel_bts(struct rsvp_head *data)
 {
-	int n = data->tgenerator>>5;
-	u32 b = 1<<(data->tgenerator&0x1F);
+	int n = data->tgenerator >> 5;
+	u32 b = 1 << (data->tgenerator & 0x1F);
 
-	if (data->tmap[n]&b)
+	if (data->tmap[n] & b)
 		return 0;
 	data->tmap[n] |= b;
 	return 1;
@@ -372,10 +372,10 @@
 
 	memset(tmap, 0, sizeof(tmap));
 
-	for (h1=0; h1<256; h1++) {
+	for (h1 = 0; h1 < 256; h1++) {
 		struct rsvp_session *s;
 		for (s = sht[h1]; s; s = s->next) {
-			for (h2=0; h2<=16; h2++) {
+			for (h2 = 0; h2 <= 16; h2++) {
 				struct rsvp_filter *f;
 
 				for (f = s->ht[h2]; f; f = f->next) {
@@ -395,8 +395,8 @@
 {
 	int i, k;
 
-	for (k=0; k<2; k++) {
-		for (i=255; i>0; i--) {
+	for (k = 0; k < 2; k++) {
+		for (i = 255; i > 0; i--) {
 			if (++data->tgenerator == 0)
 				data->tgenerator = 1;
 			if (tunnel_bts(data))
@@ -428,7 +428,7 @@
 	struct nlattr *opt = tca[TCA_OPTIONS-1];
 	struct nlattr *tb[TCA_RSVP_MAX + 1];
 	struct tcf_exts e;
-	unsigned h1, h2;
+	unsigned int h1, h2;
 	__be32 *dst;
 	int err;
 
@@ -443,7 +443,8 @@
 	if (err < 0)
 		return err;
 
-	if ((f = (struct rsvp_filter*)*arg) != NULL) {
+	f = (struct rsvp_filter *)*arg;
+	if (f) {
 		/* Node exists: adjust only classid */
 
 		if (f->handle != handle && handle)
@@ -500,7 +501,7 @@
 			goto errout;
 	}
 
-	for (sp = &data->ht[h1]; (s=*sp) != NULL; sp = &s->next) {
+	for (sp = &data->ht[h1]; (s = *sp) != NULL; sp = &s->next) {
 		if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
 		    pinfo && pinfo->protocol == s->protocol &&
 		    memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
@@ -523,7 +524,7 @@
 			tcf_exts_change(tp, &f->exts, &e);
 
 			for (fp = &s->ht[h2]; *fp; fp = &(*fp)->next)
-				if (((*fp)->spi.mask&f->spi.mask) != f->spi.mask)
+				if (((*fp)->spi.mask & f->spi.mask) != f->spi.mask)
 					break;
 			f->next = *fp;
 			wmb();
@@ -567,7 +568,7 @@
 static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 {
 	struct rsvp_head *head = tp->root;
-	unsigned h, h1;
+	unsigned int h, h1;
 
 	if (arg->stop)
 		return;
@@ -598,7 +599,7 @@
 static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
 		     struct sk_buff *skb, struct tcmsg *t)
 {
-	struct rsvp_filter *f = (struct rsvp_filter*)fh;
+	struct rsvp_filter *f = (struct rsvp_filter *)fh;
 	struct rsvp_session *s;
 	unsigned char *b = skb_tail_pointer(skb);
 	struct nlattr *nest;
@@ -624,7 +625,7 @@
 	NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo);
 	if (f->res.classid)
 		NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid);
-	if (((f->handle>>8)&0xFF) != 16)
+	if (((f->handle >> 8) & 0xFF) != 16)
 		NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src);
 
 	if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0)
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 20ef330..36667fa 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -249,7 +249,7 @@
 		 * of the hashing index is below the threshold.
 		 */
 		if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD)
-			cp.hash = (cp.mask >> cp.shift)+1;
+			cp.hash = (cp.mask >> cp.shift) + 1;
 		else
 			cp.hash = DEFAULT_HASH_SIZE;
 	}
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index b0c2a82..966920c 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -42,8 +42,7 @@
 #include <net/act_api.h>
 #include <net/pkt_cls.h>
 
-struct tc_u_knode
-{
+struct tc_u_knode {
 	struct tc_u_knode	*next;
 	u32			handle;
 	struct tc_u_hnode	*ht_up;
@@ -63,19 +62,17 @@
 	struct tc_u32_sel	sel;
 };
 
-struct tc_u_hnode
-{
+struct tc_u_hnode {
 	struct tc_u_hnode	*next;
 	u32			handle;
 	u32			prio;
 	struct tc_u_common	*tp_c;
 	int			refcnt;
-	unsigned		divisor;
+	unsigned int		divisor;
 	struct tc_u_knode	*ht[1];
 };
 
-struct tc_u_common
-{
+struct tc_u_common {
 	struct tc_u_hnode	*hlist;
 	struct Qdisc		*q;
 	int			refcnt;
@@ -87,9 +84,11 @@
 	.police = TCA_U32_POLICE
 };
 
-static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift)
+static inline unsigned int u32_hash_fold(__be32 key,
+					 const struct tc_u32_sel *sel,
+					 u8 fshift)
 {
-	unsigned h = ntohl(key & sel->hmask)>>fshift;
+	unsigned int h = ntohl(key & sel->hmask) >> fshift;
 
 	return h;
 }
@@ -101,7 +100,7 @@
 		unsigned int	  off;
 	} stack[TC_U32_MAXDEPTH];
 
-	struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
+	struct tc_u_hnode *ht = (struct tc_u_hnode *)tp->root;
 	unsigned int off = skb_network_offset(skb);
 	struct tc_u_knode *n;
 	int sdepth = 0;
@@ -120,7 +119,7 @@
 		struct tc_u32_key *key = n->sel.keys;
 
 #ifdef CONFIG_CLS_U32_PERF
-		n->pf->rcnt +=1;
+		n->pf->rcnt += 1;
 		j = 0;
 #endif
 
@@ -133,7 +132,7 @@
 		}
 #endif
 
-		for (i = n->sel.nkeys; i>0; i--, key++) {
+		for (i = n->sel.nkeys; i > 0; i--, key++) {
 			int toff = off + key->off + (off2 & key->offmask);
 			__be32 *data, _data;
 
@@ -148,13 +147,13 @@
 				goto next_knode;
 			}
 #ifdef CONFIG_CLS_U32_PERF
-			n->pf->kcnts[j] +=1;
+			n->pf->kcnts[j] += 1;
 			j++;
 #endif
 		}
 		if (n->ht_down == NULL) {
 check_terminal:
-			if (n->sel.flags&TC_U32_TERMINAL) {
+			if (n->sel.flags & TC_U32_TERMINAL) {
 
 				*res = n->res;
 #ifdef CONFIG_NET_CLS_IND
@@ -164,7 +163,7 @@
 				}
 #endif
 #ifdef CONFIG_CLS_U32_PERF
-				n->pf->rhit +=1;
+				n->pf->rhit += 1;
 #endif
 				r = tcf_exts_exec(skb, &n->exts, res);
 				if (r < 0) {
@@ -197,10 +196,10 @@
 			sel = ht->divisor & u32_hash_fold(*data, &n->sel,
 							  n->fshift);
 		}
-		if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
+		if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
 			goto next_ht;
 
-		if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
+		if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
 			off2 = n->sel.off + 3;
 			if (n->sel.flags & TC_U32_VAROFFSET) {
 				__be16 *data, _data;
@@ -215,7 +214,7 @@
 			}
 			off2 &= ~3;
 		}
-		if (n->sel.flags&TC_U32_EAT) {
+		if (n->sel.flags & TC_U32_EAT) {
 			off += off2;
 			off2 = 0;
 		}
@@ -236,11 +235,11 @@
 
 deadloop:
 	if (net_ratelimit())
-		printk(KERN_WARNING "cls_u32: dead loop\n");
+		pr_warning("cls_u32: dead loop\n");
 	return -1;
 }
 
-static __inline__ struct tc_u_hnode *
+static struct tc_u_hnode *
 u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
 {
 	struct tc_u_hnode *ht;
@@ -252,10 +251,10 @@
 	return ht;
 }
 
-static __inline__ struct tc_u_knode *
+static struct tc_u_knode *
 u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
 {
-	unsigned sel;
+	unsigned int sel;
 	struct tc_u_knode *n = NULL;
 
 	sel = TC_U32_HASH(handle);
@@ -300,7 +299,7 @@
 	do {
 		if (++tp_c->hgenerator == 0x7FF)
 			tp_c->hgenerator = 1;
-	} while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
+	} while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
 
 	return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
 }
@@ -378,9 +377,9 @@
 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
 {
 	struct tc_u_knode *n;
-	unsigned h;
+	unsigned int h;
 
-	for (h=0; h<=ht->divisor; h++) {
+	for (h = 0; h <= ht->divisor; h++) {
 		while ((n = ht->ht[h]) != NULL) {
 			ht->ht[h] = n->next;
 
@@ -446,13 +445,13 @@
 
 static int u32_delete(struct tcf_proto *tp, unsigned long arg)
 {
-	struct tc_u_hnode *ht = (struct tc_u_hnode*)arg;
+	struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
 
 	if (ht == NULL)
 		return 0;
 
 	if (TC_U32_KEY(ht->handle))
-		return u32_delete_key(tp, (struct tc_u_knode*)ht);
+		return u32_delete_key(tp, (struct tc_u_knode *)ht);
 
 	if (tp->root == ht)
 		return -EINVAL;
@@ -470,14 +469,14 @@
 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
 {
 	struct tc_u_knode *n;
-	unsigned i = 0x7FF;
+	unsigned int i = 0x7FF;
 
-	for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
+	for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
 		if (i < TC_U32_NODE(n->handle))
 			i = TC_U32_NODE(n->handle);
 	i++;
 
-	return handle|(i>0xFFF ? 0xFFF : i);
+	return handle | (i > 0xFFF ? 0xFFF : i);
 }
 
 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
@@ -566,7 +565,8 @@
 	if (err < 0)
 		return err;
 
-	if ((n = (struct tc_u_knode*)*arg) != NULL) {
+	n = (struct tc_u_knode *)*arg;
+	if (n) {
 		if (TC_U32_KEY(n->handle) == 0)
 			return -EINVAL;
 
@@ -574,7 +574,7 @@
 	}
 
 	if (tb[TCA_U32_DIVISOR]) {
-		unsigned divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
+		unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
 
 		if (--divisor > 0x100)
 			return -EINVAL;
@@ -585,7 +585,7 @@
 			if (handle == 0)
 				return -ENOMEM;
 		}
-		ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
+		ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
 		if (ht == NULL)
 			return -ENOBUFS;
 		ht->tp_c = tp_c;
@@ -683,7 +683,7 @@
 	struct tc_u_common *tp_c = tp->data;
 	struct tc_u_hnode *ht;
 	struct tc_u_knode *n;
-	unsigned h;
+	unsigned int h;
 
 	if (arg->stop)
 		return;
@@ -717,7 +717,7 @@
 static int u32_dump(struct tcf_proto *tp, unsigned long fh,
 		     struct sk_buff *skb, struct tcmsg *t)
 {
-	struct tc_u_knode *n = (struct tc_u_knode*)fh;
+	struct tc_u_knode *n = (struct tc_u_knode *)fh;
 	struct nlattr *nest;
 
 	if (n == NULL)
@@ -730,8 +730,9 @@
 		goto nla_put_failure;
 
 	if (TC_U32_KEY(n->handle) == 0) {
-		struct tc_u_hnode *ht = (struct tc_u_hnode*)fh;
-		u32 divisor = ht->divisor+1;
+		struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
+		u32 divisor = ht->divisor + 1;
+
 		NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor);
 	} else {
 		NLA_PUT(skb, TCA_U32_SEL,
@@ -755,7 +756,7 @@
 			goto nla_put_failure;
 
 #ifdef CONFIG_NET_CLS_IND
-		if(strlen(n->indev))
+		if (strlen(n->indev))
 			NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev);
 #endif
 #ifdef CONFIG_CLS_U32_PERF
diff --git a/net/sched/em_cmp.c b/net/sched/em_cmp.c
index bc45039..1c8360a 100644
--- a/net/sched/em_cmp.c
+++ b/net/sched/em_cmp.c
@@ -33,40 +33,41 @@
 		return 0;
 
 	switch (cmp->align) {
-		case TCF_EM_ALIGN_U8:
-			val = *ptr;
-			break;
+	case TCF_EM_ALIGN_U8:
+		val = *ptr;
+		break;
 
-		case TCF_EM_ALIGN_U16:
-			val = get_unaligned_be16(ptr);
+	case TCF_EM_ALIGN_U16:
+		val = get_unaligned_be16(ptr);
 
-			if (cmp_needs_transformation(cmp))
-				val = be16_to_cpu(val);
-			break;
+		if (cmp_needs_transformation(cmp))
+			val = be16_to_cpu(val);
+		break;
 
-		case TCF_EM_ALIGN_U32:
-			/* Worth checking boundries? The branching seems
-			 * to get worse. Visit again. */
-			val = get_unaligned_be32(ptr);
+	case TCF_EM_ALIGN_U32:
+		/* Worth checking boundries? The branching seems
+		 * to get worse. Visit again.
+		 */
+		val = get_unaligned_be32(ptr);
 
-			if (cmp_needs_transformation(cmp))
-				val = be32_to_cpu(val);
-			break;
+		if (cmp_needs_transformation(cmp))
+			val = be32_to_cpu(val);
+		break;
 
-		default:
-			return 0;
+	default:
+		return 0;
 	}
 
 	if (cmp->mask)
 		val &= cmp->mask;
 
 	switch (cmp->opnd) {
-		case TCF_EM_OPND_EQ:
-			return val == cmp->val;
-		case TCF_EM_OPND_LT:
-			return val < cmp->val;
-		case TCF_EM_OPND_GT:
-			return val > cmp->val;
+	case TCF_EM_OPND_EQ:
+		return val == cmp->val;
+	case TCF_EM_OPND_LT:
+		return val < cmp->val;
+	case TCF_EM_OPND_GT:
+		return val > cmp->val;
 	}
 
 	return 0;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 34da5e2..a889d09 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -73,21 +73,18 @@
 #include <net/pkt_cls.h>
 #include <net/sock.h>
 
-struct meta_obj
-{
+struct meta_obj {
 	unsigned long		value;
 	unsigned int		len;
 };
 
-struct meta_value
-{
+struct meta_value {
 	struct tcf_meta_val	hdr;
 	unsigned long		val;
 	unsigned int		len;
 };
 
-struct meta_match
-{
+struct meta_match {
 	struct meta_value	lvalue;
 	struct meta_value	rvalue;
 };
@@ -255,7 +252,7 @@
 	if (unlikely(skb_dst(skb) == NULL))
 		*err = -1;
 	else
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
 		dst->value = skb_dst(skb)->tclassid;
 #else
 		dst->value = 0;
@@ -483,8 +480,7 @@
  * Meta value collectors assignment table
  **************************************************************************/
 
-struct meta_ops
-{
+struct meta_ops {
 	void		(*get)(struct sk_buff *, struct tcf_pkt_info *,
 			       struct meta_value *, struct meta_obj *, int *);
 };
@@ -494,7 +490,7 @@
 
 /* Meta value operations table listing all meta value collectors and
  * assigns them to a type and meta id. */
-static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
+static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = {
 	[TCF_META_TYPE_VAR] = {
 		[META_ID(DEV)]			= META_FUNC(var_dev),
 		[META_ID(SK_BOUND_IF)] 		= META_FUNC(var_sk_bound_if),
@@ -550,7 +546,7 @@
 	}
 };
 
-static inline struct meta_ops * meta_ops(struct meta_value *val)
+static inline struct meta_ops *meta_ops(struct meta_value *val)
 {
 	return &__meta_ops[meta_type(val)][meta_id(val)];
 }
@@ -649,9 +645,8 @@
 {
 	if (v->len == sizeof(unsigned long))
 		NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val);
-	else if (v->len == sizeof(u32)) {
+	else if (v->len == sizeof(u32))
 		NLA_PUT_U32(skb, tlv, v->val);
-	}
 
 	return 0;
 
@@ -663,8 +658,7 @@
  * Type specific operations table
  **************************************************************************/
 
-struct meta_type_ops
-{
+struct meta_type_ops {
 	void	(*destroy)(struct meta_value *);
 	int	(*compare)(struct meta_obj *, struct meta_obj *);
 	int	(*change)(struct meta_value *, struct nlattr *);
@@ -672,7 +666,7 @@
 	int	(*dump)(struct sk_buff *, struct meta_value *, int);
 };
 
-static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX+1] = {
+static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
 	[TCF_META_TYPE_VAR] = {
 		.destroy = meta_var_destroy,
 		.compare = meta_var_compare,
@@ -688,7 +682,7 @@
 	}
 };
 
-static inline struct meta_type_ops * meta_type_ops(struct meta_value *v)
+static inline struct meta_type_ops *meta_type_ops(struct meta_value *v)
 {
 	return &__meta_type_ops[meta_type(v)];
 }
@@ -713,7 +707,7 @@
 		return err;
 
 	if (meta_type_ops(v)->apply_extras)
-	    meta_type_ops(v)->apply_extras(v, dst);
+		meta_type_ops(v)->apply_extras(v, dst);
 
 	return 0;
 }
@@ -732,12 +726,12 @@
 	r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value);
 
 	switch (meta->lvalue.hdr.op) {
-		case TCF_EM_OPND_EQ:
-			return !r;
-		case TCF_EM_OPND_LT:
-			return r < 0;
-		case TCF_EM_OPND_GT:
-			return r > 0;
+	case TCF_EM_OPND_EQ:
+		return !r;
+	case TCF_EM_OPND_LT:
+		return r < 0;
+	case TCF_EM_OPND_GT:
+		return r > 0;
 	}
 
 	return 0;
@@ -771,7 +765,7 @@
 
 static inline int meta_is_supported(struct meta_value *val)
 {
-	return (!meta_id(val) || meta_ops(val)->get);
+	return !meta_id(val) || meta_ops(val)->get;
 }
 
 static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = {
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c
index 1a4176a..a3bed07 100644
--- a/net/sched/em_nbyte.c
+++ b/net/sched/em_nbyte.c
@@ -18,8 +18,7 @@
 #include <linux/tc_ematch/tc_em_nbyte.h>
 #include <net/pkt_cls.h>
 
-struct nbyte_data
-{
+struct nbyte_data {
 	struct tcf_em_nbyte	hdr;
 	char			pattern[0];
 };
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index ea8f566..15d353d 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -19,8 +19,7 @@
 #include <linux/tc_ematch/tc_em_text.h>
 #include <net/pkt_cls.h>
 
-struct text_match
-{
+struct text_match {
 	u16			from_offset;
 	u16			to_offset;
 	u8			from_layer;
diff --git a/net/sched/em_u32.c b/net/sched/em_u32.c
index 953f147..797bdb8 100644
--- a/net/sched/em_u32.c
+++ b/net/sched/em_u32.c
@@ -35,7 +35,7 @@
 	if (!tcf_valid_offset(skb, ptr, sizeof(u32)))
 		return 0;
 
-	return !(((*(__be32*) ptr)  ^ key->val) & key->mask);
+	return !(((*(__be32 *) ptr)  ^ key->val) & key->mask);
 }
 
 static struct tcf_ematch_ops em_u32_ops = {
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 5e37da9..88d93eb 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -93,7 +93,7 @@
 static LIST_HEAD(ematch_ops);
 static DEFINE_RWLOCK(ematch_mod_lock);
 
-static inline struct tcf_ematch_ops * tcf_em_lookup(u16 kind)
+static struct tcf_ematch_ops *tcf_em_lookup(u16 kind)
 {
 	struct tcf_ematch_ops *e = NULL;
 
@@ -163,8 +163,8 @@
 }
 EXPORT_SYMBOL(tcf_em_unregister);
 
-static inline struct tcf_ematch * tcf_em_get_match(struct tcf_ematch_tree *tree,
-						   int index)
+static inline struct tcf_ematch *tcf_em_get_match(struct tcf_ematch_tree *tree,
+						  int index)
 {
 	return &tree->matches[index];
 }
@@ -184,7 +184,8 @@
 
 	if (em_hdr->kind == TCF_EM_CONTAINER) {
 		/* Special ematch called "container", carries an index
-		 * referencing an external ematch sequence. */
+		 * referencing an external ematch sequence.
+		 */
 		u32 ref;
 
 		if (data_len < sizeof(ref))
@@ -195,7 +196,8 @@
 			goto errout;
 
 		/* We do not allow backward jumps to avoid loops and jumps
-		 * to our own position are of course illegal. */
+		 * to our own position are of course illegal.
+		 */
 		if (ref <= idx)
 			goto errout;
 
@@ -208,7 +210,8 @@
 		 * which automatically releases the reference again, therefore
 		 * the module MUST not be given back under any circumstances
 		 * here. Be aware, the destroy function assumes that the
-		 * module is held if the ops field is non zero. */
+		 * module is held if the ops field is non zero.
+		 */
 		em->ops = tcf_em_lookup(em_hdr->kind);
 
 		if (em->ops == NULL) {
@@ -221,7 +224,8 @@
 			if (em->ops) {
 				/* We dropped the RTNL mutex in order to
 				 * perform the module load. Tell the caller
-				 * to replay the request. */
+				 * to replay the request.
+				 */
 				module_put(em->ops->owner);
 				err = -EAGAIN;
 			}
@@ -230,7 +234,8 @@
 		}
 
 		/* ematch module provides expected length of data, so we
-		 * can do a basic sanity check. */
+		 * can do a basic sanity check.
+		 */
 		if (em->ops->datalen && data_len < em->ops->datalen)
 			goto errout;
 
@@ -246,7 +251,8 @@
 			 * TCF_EM_SIMPLE may be specified stating that the
 			 * data only consists of a u32 integer and the module
 			 * does not expected a memory reference but rather
-			 * the value carried. */
+			 * the value carried.
+			 */
 			if (em_hdr->flags & TCF_EM_SIMPLE) {
 				if (data_len < sizeof(u32))
 					goto errout;
@@ -334,7 +340,8 @@
 	 * The array of rt attributes is parsed in the order as they are
 	 * provided, their type must be incremental from 1 to n. Even
 	 * if it does not serve any real purpose, a failure of sticking
-	 * to this policy will result in parsing failure. */
+	 * to this policy will result in parsing failure.
+	 */
 	for (idx = 0; nla_ok(rt_match, list_len); idx++) {
 		err = -EINVAL;
 
@@ -359,7 +366,8 @@
 	/* Check if the number of matches provided by userspace actually
 	 * complies with the array of matches. The number was used for
 	 * the validation of references and a mismatch could lead to
-	 * undefined references during the matching process. */
+	 * undefined references during the matching process.
+	 */
 	if (idx != tree_hdr->nmatches) {
 		err = -EINVAL;
 		goto errout_abort;
@@ -449,7 +457,7 @@
 			.flags = em->flags
 		};
 
-		NLA_PUT(skb, i+1, sizeof(em_hdr), &em_hdr);
+		NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr);
 
 		if (em->ops && em->ops->dump) {
 			if (em->ops->dump(skb, em) < 0)
@@ -478,6 +486,7 @@
 			       struct tcf_pkt_info *info)
 {
 	int r = em->ops->match(skb, em, info);
+
 	return tcf_em_is_inverted(em) ? !r : r;
 }
 
@@ -527,8 +536,8 @@
 
 stack_overflow:
 	if (net_ratelimit())
-		printk(KERN_WARNING "tc ematch: local stack overflow,"
-			" increase NET_EMATCH_STACK\n");
+		pr_warning("tc ematch: local stack overflow,"
+			   " increase NET_EMATCH_STACK\n");
 	return -1;
 }
 EXPORT_SYMBOL(__tcf_em_tree_match);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b22ca2d..1507415 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -187,7 +187,7 @@
 	int err = -ENOENT;
 
 	write_lock(&qdisc_mod_lock);
-	for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
+	for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
 		if (q == qops)
 			break;
 	if (q) {
@@ -321,7 +321,9 @@
 	if (!tab || --tab->refcnt)
 		return;
 
-	for (rtabp = &qdisc_rtab_list; (rtab=*rtabp) != NULL; rtabp = &rtab->next) {
+	for (rtabp = &qdisc_rtab_list;
+	     (rtab = *rtabp) != NULL;
+	     rtabp = &rtab->next) {
 		if (rtab == tab) {
 			*rtabp = rtab->next;
 			kfree(rtab);
@@ -396,6 +398,11 @@
 	return stab;
 }
 
+static void stab_kfree_rcu(struct rcu_head *head)
+{
+	kfree(container_of(head, struct qdisc_size_table, rcu));
+}
+
 void qdisc_put_stab(struct qdisc_size_table *tab)
 {
 	if (!tab)
@@ -405,7 +412,7 @@
 
 	if (--tab->refcnt == 0) {
 		list_del(&tab->list);
-		kfree(tab);
+		call_rcu_bh(&tab->rcu, stab_kfree_rcu);
 	}
 
 	spin_unlock(&qdisc_stab_lock);
@@ -428,7 +435,7 @@
 	return -1;
 }
 
-void qdisc_calculate_pkt_len(struct sk_buff *skb, struct qdisc_size_table *stab)
+void __qdisc_calculate_pkt_len(struct sk_buff *skb, const struct qdisc_size_table *stab)
 {
 	int pkt_len, slot;
 
@@ -454,14 +461,13 @@
 		pkt_len = 1;
 	qdisc_skb_cb(skb)->pkt_len = pkt_len;
 }
-EXPORT_SYMBOL(qdisc_calculate_pkt_len);
+EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
 
 void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc)
 {
 	if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
-		printk(KERN_WARNING
-		       "%s: %s qdisc %X: is non-work-conserving?\n",
-		       txt, qdisc->ops->id, qdisc->handle >> 16);
+		pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
+			txt, qdisc->ops->id, qdisc->handle >> 16);
 		qdisc->flags |= TCQ_F_WARN_NONWC;
 	}
 }
@@ -472,7 +478,7 @@
 	struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
 						 timer);
 
-	wd->qdisc->flags &= ~TCQ_F_THROTTLED;
+	qdisc_unthrottled(wd->qdisc);
 	__netif_schedule(qdisc_root(wd->qdisc));
 
 	return HRTIMER_NORESTART;
@@ -494,7 +500,7 @@
 		     &qdisc_root_sleeping(wd->qdisc)->state))
 		return;
 
-	wd->qdisc->flags |= TCQ_F_THROTTLED;
+	qdisc_throttled(wd->qdisc);
 	time = ktime_set(0, 0);
 	time = ktime_add_ns(time, PSCHED_TICKS2NS(expires));
 	hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
@@ -504,7 +510,7 @@
 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
 {
 	hrtimer_cancel(&wd->timer);
-	wd->qdisc->flags &= ~TCQ_F_THROTTLED;
+	qdisc_unthrottled(wd->qdisc);
 }
 EXPORT_SYMBOL(qdisc_watchdog_cancel);
 
@@ -625,7 +631,7 @@
 			autohandle = TC_H_MAKE(0x80000000U, 0);
 	} while	(qdisc_lookup(dev, autohandle) && --i > 0);
 
-	return i>0 ? autohandle : 0;
+	return i > 0 ? autohandle : 0;
 }
 
 void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
@@ -834,7 +840,7 @@
 				err = PTR_ERR(stab);
 				goto err_out4;
 			}
-			sch->stab = stab;
+			rcu_assign_pointer(sch->stab, stab);
 		}
 		if (tca[TCA_RATE]) {
 			spinlock_t *root_lock;
@@ -874,7 +880,7 @@
 	 * Any broken qdiscs that would require a ops->reset() here?
 	 * The qdisc was never in action so it shouldn't be necessary.
 	 */
-	qdisc_put_stab(sch->stab);
+	qdisc_put_stab(rtnl_dereference(sch->stab));
 	if (ops->destroy)
 		ops->destroy(sch);
 	goto err_out3;
@@ -882,7 +888,7 @@
 
 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
 {
-	struct qdisc_size_table *stab = NULL;
+	struct qdisc_size_table *ostab, *stab = NULL;
 	int err = 0;
 
 	if (tca[TCA_OPTIONS]) {
@@ -899,8 +905,9 @@
 			return PTR_ERR(stab);
 	}
 
-	qdisc_put_stab(sch->stab);
-	sch->stab = stab;
+	ostab = rtnl_dereference(sch->stab);
+	rcu_assign_pointer(sch->stab, stab);
+	qdisc_put_stab(ostab);
 
 	if (tca[TCA_RATE]) {
 		/* NB: ignores errors from replace_estimator
@@ -915,9 +922,8 @@
 	return 0;
 }
 
-struct check_loop_arg
-{
-	struct qdisc_walker 	w;
+struct check_loop_arg {
+	struct qdisc_walker	w;
 	struct Qdisc		*p;
 	int			depth;
 };
@@ -970,7 +976,8 @@
 	struct Qdisc *p = NULL;
 	int err;
 
-	if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+	if (!dev)
 		return -ENODEV;
 
 	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -980,12 +987,12 @@
 	if (clid) {
 		if (clid != TC_H_ROOT) {
 			if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
-				if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
+				p = qdisc_lookup(dev, TC_H_MAJ(clid));
+				if (!p)
 					return -ENOENT;
 				q = qdisc_leaf(p, clid);
-			} else { /* ingress */
-				if (dev_ingress_queue(dev))
-					q = dev_ingress_queue(dev)->qdisc_sleeping;
+			} else if (dev_ingress_queue(dev)) {
+				q = dev_ingress_queue(dev)->qdisc_sleeping;
 			}
 		} else {
 			q = dev->qdisc;
@@ -996,7 +1003,8 @@
 		if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
 			return -EINVAL;
 	} else {
-		if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
+		q = qdisc_lookup(dev, tcm->tcm_handle);
+		if (!q)
 			return -ENOENT;
 	}
 
@@ -1008,7 +1016,8 @@
 			return -EINVAL;
 		if (q->handle == 0)
 			return -ENOENT;
-		if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0)
+		err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
+		if (err != 0)
 			return err;
 	} else {
 		qdisc_notify(net, skb, n, clid, NULL, q);
@@ -1017,7 +1026,7 @@
 }
 
 /*
-   Create/change qdisc.
+ * Create/change qdisc.
  */
 
 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
@@ -1036,7 +1045,8 @@
 	clid = tcm->tcm_parent;
 	q = p = NULL;
 
-	if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+	if (!dev)
 		return -ENODEV;
 
 	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1046,12 +1056,12 @@
 	if (clid) {
 		if (clid != TC_H_ROOT) {
 			if (clid != TC_H_INGRESS) {
-				if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
+				p = qdisc_lookup(dev, TC_H_MAJ(clid));
+				if (!p)
 					return -ENOENT;
 				q = qdisc_leaf(p, clid);
-			} else { /* ingress */
-				if (dev_ingress_queue_create(dev))
-					q = dev_ingress_queue(dev)->qdisc_sleeping;
+			} else if (dev_ingress_queue_create(dev)) {
+				q = dev_ingress_queue(dev)->qdisc_sleeping;
 			}
 		} else {
 			q = dev->qdisc;
@@ -1063,13 +1073,14 @@
 
 		if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
 			if (tcm->tcm_handle) {
-				if (q && !(n->nlmsg_flags&NLM_F_REPLACE))
+				if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
 					return -EEXIST;
 				if (TC_H_MIN(tcm->tcm_handle))
 					return -EINVAL;
-				if ((q = qdisc_lookup(dev, tcm->tcm_handle)) == NULL)
+				q = qdisc_lookup(dev, tcm->tcm_handle);
+				if (!q)
 					goto create_n_graft;
-				if (n->nlmsg_flags&NLM_F_EXCL)
+				if (n->nlmsg_flags & NLM_F_EXCL)
 					return -EEXIST;
 				if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
 					return -EINVAL;
@@ -1079,7 +1090,7 @@
 				atomic_inc(&q->refcnt);
 				goto graft;
 			} else {
-				if (q == NULL)
+				if (!q)
 					goto create_n_graft;
 
 				/* This magic test requires explanation.
@@ -1101,9 +1112,9 @@
 				 *   For now we select create/graft, if
 				 *   user gave KIND, which does not match existing.
 				 */
-				if ((n->nlmsg_flags&NLM_F_CREATE) &&
-				    (n->nlmsg_flags&NLM_F_REPLACE) &&
-				    ((n->nlmsg_flags&NLM_F_EXCL) ||
+				if ((n->nlmsg_flags & NLM_F_CREATE) &&
+				    (n->nlmsg_flags & NLM_F_REPLACE) &&
+				    ((n->nlmsg_flags & NLM_F_EXCL) ||
 				     (tca[TCA_KIND] &&
 				      nla_strcmp(tca[TCA_KIND], q->ops->id))))
 					goto create_n_graft;
@@ -1118,7 +1129,7 @@
 	/* Change qdisc parameters */
 	if (q == NULL)
 		return -ENOENT;
-	if (n->nlmsg_flags&NLM_F_EXCL)
+	if (n->nlmsg_flags & NLM_F_EXCL)
 		return -EEXIST;
 	if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
 		return -EINVAL;
@@ -1128,7 +1139,7 @@
 	return err;
 
 create_n_graft:
-	if (!(n->nlmsg_flags&NLM_F_CREATE))
+	if (!(n->nlmsg_flags & NLM_F_CREATE))
 		return -ENOENT;
 	if (clid == TC_H_INGRESS) {
 		if (dev_ingress_queue(dev))
@@ -1175,6 +1186,7 @@
 	struct nlmsghdr  *nlh;
 	unsigned char *b = skb_tail_pointer(skb);
 	struct gnet_dump d;
+	struct qdisc_size_table *stab;
 
 	nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
 	tcm = NLMSG_DATA(nlh);
@@ -1190,7 +1202,8 @@
 		goto nla_put_failure;
 	q->qstats.qlen = q->q.qlen;
 
-	if (q->stab && qdisc_dump_stab(skb, q->stab) < 0)
+	stab = rtnl_dereference(q->stab);
+	if (stab && qdisc_dump_stab(skb, stab) < 0)
 		goto nla_put_failure;
 
 	if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
@@ -1234,16 +1247,19 @@
 		return -ENOBUFS;
 
 	if (old && !tc_qdisc_dump_ignore(old)) {
-		if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq, 0, RTM_DELQDISC) < 0)
+		if (tc_fill_qdisc(skb, old, clid, pid, n->nlmsg_seq,
+				  0, RTM_DELQDISC) < 0)
 			goto err_out;
 	}
 	if (new && !tc_qdisc_dump_ignore(new)) {
-		if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq, old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
+		if (tc_fill_qdisc(skb, new, clid, pid, n->nlmsg_seq,
+				  old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
 			goto err_out;
 	}
 
 	if (skb->len)
-		return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+		return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+				      n->nlmsg_flags & NLM_F_ECHO);
 
 err_out:
 	kfree_skb(skb);
@@ -1275,7 +1291,7 @@
 			q_idx++;
 			continue;
 		}
-		if (!tc_qdisc_dump_ignore(q) && 
+		if (!tc_qdisc_dump_ignore(q) &&
 		    tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
 				  cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
 			goto done;
@@ -1356,7 +1372,8 @@
 	u32 qid = TC_H_MAJ(clid);
 	int err;
 
-	if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+	dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+	if (!dev)
 		return -ENODEV;
 
 	err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1391,9 +1408,9 @@
 			qid = dev->qdisc->handle;
 
 		/* Now qid is genuine qdisc handle consistent
-		   both with parent and child.
-
-		   TC_H_MAJ(pid) still may be unspecified, complete it now.
+		 * both with parent and child.
+		 *
+		 * TC_H_MAJ(pid) still may be unspecified, complete it now.
 		 */
 		if (pid)
 			pid = TC_H_MAKE(qid, pid);
@@ -1403,7 +1420,8 @@
 	}
 
 	/* OK. Locate qdisc */
-	if ((q = qdisc_lookup(dev, qid)) == NULL)
+	q = qdisc_lookup(dev, qid);
+	if (!q)
 		return -ENOENT;
 
 	/* An check that it supports classes */
@@ -1423,13 +1441,14 @@
 
 	if (cl == 0) {
 		err = -ENOENT;
-		if (n->nlmsg_type != RTM_NEWTCLASS || !(n->nlmsg_flags&NLM_F_CREATE))
+		if (n->nlmsg_type != RTM_NEWTCLASS ||
+		    !(n->nlmsg_flags & NLM_F_CREATE))
 			goto out;
 	} else {
 		switch (n->nlmsg_type) {
 		case RTM_NEWTCLASS:
 			err = -EEXIST;
-			if (n->nlmsg_flags&NLM_F_EXCL)
+			if (n->nlmsg_flags & NLM_F_EXCL)
 				goto out;
 			break;
 		case RTM_DELTCLASS:
@@ -1521,14 +1540,14 @@
 		return -EINVAL;
 	}
 
-	return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
+	return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
+			      n->nlmsg_flags & NLM_F_ECHO);
 }
 
-struct qdisc_dump_args
-{
-	struct qdisc_walker w;
-	struct sk_buff *skb;
-	struct netlink_callback *cb;
+struct qdisc_dump_args {
+	struct qdisc_walker	w;
+	struct sk_buff		*skb;
+	struct netlink_callback	*cb;
 };
 
 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walker *arg)
@@ -1590,7 +1609,7 @@
 
 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
 {
-	struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
+	struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh);
 	struct net *net = sock_net(skb->sk);
 	struct netdev_queue *dev_queue;
 	struct net_device *dev;
@@ -1598,7 +1617,8 @@
 
 	if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
 		return 0;
-	if ((dev = dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
+	dev = dev_get_by_index(net, tcm->tcm_ifindex);
+	if (!dev)
 		return 0;
 
 	s_t = cb->args[0];
@@ -1621,19 +1641,22 @@
 }
 
 /* Main classifier routine: scans classifier chain attached
-   to this qdisc, (optionally) tests for protocol and asks
-   specific classifiers.
+ * to this qdisc, (optionally) tests for protocol and asks
+ * specific classifiers.
  */
 int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
 		       struct tcf_result *res)
 {
 	__be16 protocol = skb->protocol;
-	int err = 0;
+	int err;
 
 	for (; tp; tp = tp->next) {
-		if ((tp->protocol == protocol ||
-		     tp->protocol == htons(ETH_P_ALL)) &&
-		    (err = tp->classify(skb, tp, res)) >= 0) {
+		if (tp->protocol != protocol &&
+		    tp->protocol != htons(ETH_P_ALL))
+			continue;
+		err = tp->classify(skb, tp, res);
+
+		if (err >= 0) {
 #ifdef CONFIG_NET_CLS_ACT
 			if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
 				skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
@@ -1664,11 +1687,11 @@
 
 		if (verd++ >= MAX_REC_LOOP) {
 			if (net_ratelimit())
-				printk(KERN_NOTICE
-				       "%s: packet reclassify loop"
+				pr_notice("%s: packet reclassify loop"
 					  " rule prio %u protocol %02x\n",
-				       tp->q->ops->id,
-				       tp->prio & 0xffff, ntohs(tp->protocol));
+					  tp->q->ops->id,
+					  tp->prio & 0xffff,
+					  ntohs(tp->protocol));
 			return TC_ACT_SHOT;
 		}
 		skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
@@ -1761,7 +1784,7 @@
 
 	err = register_pernet_subsys(&psched_net_ops);
 	if (err) {
-		printk(KERN_ERR "pktsched_init: "
+		pr_err("pktsched_init: "
 		       "cannot initialize per netns operations\n");
 		return err;
 	}
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 943d733..3f08158 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -319,7 +319,7 @@
 	 * creation), and one for the reference held when calling delete.
 	 */
 	if (flow->ref < 2) {
-		printk(KERN_ERR "atm_tc_delete: flow->ref == %d\n", flow->ref);
+		pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref);
 		return -EINVAL;
 	}
 	if (flow->ref > 2)
@@ -384,12 +384,12 @@
 			}
 		}
 		flow = NULL;
-	done:
-		;		
+done:
+		;
 	}
-	if (!flow)
+	if (!flow) {
 		flow = &p->link;
-	else {
+	} else {
 		if (flow->vcc)
 			ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
 		/*@@@ looks good ... but it's not supposed to work :-) */
@@ -576,8 +576,7 @@
 
 	list_for_each_entry_safe(flow, tmp, &p->flows, list) {
 		if (flow->ref > 1)
-			printk(KERN_ERR "atm_destroy: %p->ref = %d\n", flow,
-			       flow->ref);
+			pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref);
 		atm_tc_put(sch, (unsigned long)flow);
 	}
 	tasklet_kill(&p->task);
@@ -616,9 +615,8 @@
 	}
 	if (flow->excess)
 		NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid);
-	else {
+	else
 		NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0);
-	}
 
 	nla_nest_end(skb, nest);
 	return skb->len;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index c80d1c2..24d94c0 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -72,8 +72,7 @@
 struct cbq_sched_data;
 
 
-struct cbq_class
-{
+struct cbq_class {
 	struct Qdisc_class_common common;
 	struct cbq_class	*next_alive;	/* next class with backlog in this priority band */
 
@@ -139,19 +138,18 @@
 	int			refcnt;
 	int			filters;
 
-	struct cbq_class 	*defaults[TC_PRIO_MAX+1];
+	struct cbq_class	*defaults[TC_PRIO_MAX + 1];
 };
 
-struct cbq_sched_data
-{
+struct cbq_sched_data {
 	struct Qdisc_class_hash	clhash;			/* Hash table of all classes */
-	int			nclasses[TC_CBQ_MAXPRIO+1];
-	unsigned		quanta[TC_CBQ_MAXPRIO+1];
+	int			nclasses[TC_CBQ_MAXPRIO + 1];
+	unsigned int		quanta[TC_CBQ_MAXPRIO + 1];
 
 	struct cbq_class	link;
 
-	unsigned		activemask;
-	struct cbq_class	*active[TC_CBQ_MAXPRIO+1];	/* List of all classes
+	unsigned int		activemask;
+	struct cbq_class	*active[TC_CBQ_MAXPRIO + 1];	/* List of all classes
 								   with backlog */
 
 #ifdef CONFIG_NET_CLS_ACT
@@ -162,7 +160,7 @@
 	int			tx_len;
 	psched_time_t		now;		/* Cached timestamp */
 	psched_time_t		now_rt;		/* Cached real time */
-	unsigned		pmask;
+	unsigned int		pmask;
 
 	struct hrtimer		delay_timer;
 	struct qdisc_watchdog	watchdog;	/* Watchdog timer,
@@ -175,9 +173,9 @@
 };
 
 
-#define L2T(cl,len)	qdisc_l2t((cl)->R_tab,len)
+#define L2T(cl, len)	qdisc_l2t((cl)->R_tab, len)
 
-static __inline__ struct cbq_class *
+static inline struct cbq_class *
 cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
 {
 	struct Qdisc_class_common *clc;
@@ -193,25 +191,27 @@
 static struct cbq_class *
 cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
 {
-	struct cbq_class *cl, *new;
+	struct cbq_class *cl;
 
-	for (cl = this->tparent; cl; cl = cl->tparent)
-		if ((new = cl->defaults[TC_PRIO_BESTEFFORT]) != NULL && new != this)
+	for (cl = this->tparent; cl; cl = cl->tparent) {
+		struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
+
+		if (new != NULL && new != this)
 			return new;
-
+	}
 	return NULL;
 }
 
 #endif
 
 /* Classify packet. The procedure is pretty complicated, but
-   it allows us to combine link sharing and priority scheduling
-   transparently.
-
-   Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
-   so that it resolves to split nodes. Then packets are classified
-   by logical priority, or a more specific classifier may be attached
-   to the split node.
+ * it allows us to combine link sharing and priority scheduling
+ * transparently.
+ *
+ * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
+ * so that it resolves to split nodes. Then packets are classified
+ * by logical priority, or a more specific classifier may be attached
+ * to the split node.
  */
 
 static struct cbq_class *
@@ -227,7 +227,7 @@
 	/*
 	 *  Step 1. If skb->priority points to one of our classes, use it.
 	 */
-	if (TC_H_MAJ(prio^sch->handle) == 0 &&
+	if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
 	    (cl = cbq_class_lookup(q, prio)) != NULL)
 		return cl;
 
@@ -243,10 +243,11 @@
 		    (result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
 			goto fallback;
 
-		if ((cl = (void*)res.class) == NULL) {
+		cl = (void *)res.class;
+		if (!cl) {
 			if (TC_H_MAJ(res.classid))
 				cl = cbq_class_lookup(q, res.classid);
-			else if ((cl = defmap[res.classid&TC_PRIO_MAX]) == NULL)
+			else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
 				cl = defmap[TC_PRIO_BESTEFFORT];
 
 			if (cl == NULL || cl->level >= head->level)
@@ -282,7 +283,7 @@
 	 * Step 4. No success...
 	 */
 	if (TC_H_MAJ(prio) == 0 &&
-	    !(cl = head->defaults[prio&TC_PRIO_MAX]) &&
+	    !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
 	    !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
 		return head;
 
@@ -290,12 +291,12 @@
 }
 
 /*
-   A packet has just been enqueued on the empty class.
-   cbq_activate_class adds it to the tail of active class list
-   of its priority band.
+ * A packet has just been enqueued on the empty class.
+ * cbq_activate_class adds it to the tail of active class list
+ * of its priority band.
  */
 
-static __inline__ void cbq_activate_class(struct cbq_class *cl)
+static inline void cbq_activate_class(struct cbq_class *cl)
 {
 	struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
 	int prio = cl->cpriority;
@@ -314,9 +315,9 @@
 }
 
 /*
-   Unlink class from active chain.
-   Note that this same procedure is done directly in cbq_dequeue*
-   during round-robin procedure.
+ * Unlink class from active chain.
+ * Note that this same procedure is done directly in cbq_dequeue*
+ * during round-robin procedure.
  */
 
 static void cbq_deactivate_class(struct cbq_class *this)
@@ -350,7 +351,7 @@
 {
 	int toplevel = q->toplevel;
 
-	if (toplevel > cl->level && !(cl->q->flags&TCQ_F_THROTTLED)) {
+	if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) {
 		psched_time_t now;
 		psched_tdiff_t incr;
 
@@ -363,7 +364,7 @@
 				q->toplevel = cl->level;
 				return;
 			}
-		} while ((cl=cl->borrow) != NULL && toplevel > cl->level);
+		} while ((cl = cl->borrow) != NULL && toplevel > cl->level);
 	}
 }
 
@@ -390,7 +391,6 @@
 	ret = qdisc_enqueue(skb, cl->q);
 	if (ret == NET_XMIT_SUCCESS) {
 		sch->q.qlen++;
-		qdisc_bstats_update(sch, skb);
 		cbq_mark_toplevel(q, cl);
 		if (!cl->next_alive)
 			cbq_activate_class(cl);
@@ -418,11 +418,11 @@
 		delay += cl->offtime;
 
 		/*
-		   Class goes to sleep, so that it will have no
-		   chance to work avgidle. Let's forgive it 8)
-
-		   BTW cbq-2.0 has a crap in this
-		   place, apparently they forgot to shift it by cl->ewma_log.
+		 * Class goes to sleep, so that it will have no
+		 * chance to work avgidle. Let's forgive it 8)
+		 *
+		 * BTW cbq-2.0 has a crap in this
+		 * place, apparently they forgot to shift it by cl->ewma_log.
 		 */
 		if (cl->avgidle < 0)
 			delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
@@ -439,8 +439,8 @@
 		q->wd_expires = delay;
 
 	/* Dirty work! We must schedule wakeups based on
-	   real available rate, rather than leaf rate,
-	   which may be tiny (even zero).
+	 * real available rate, rather than leaf rate,
+	 * which may be tiny (even zero).
 	 */
 	if (q->toplevel == TC_CBQ_MAXLEVEL) {
 		struct cbq_class *b;
@@ -460,7 +460,7 @@
 }
 
 /* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
-   they go overlimit
+ * they go overlimit
  */
 
 static void cbq_ovl_rclassic(struct cbq_class *cl)
@@ -595,7 +595,7 @@
 	struct Qdisc *sch = q->watchdog.qdisc;
 	psched_time_t now;
 	psched_tdiff_t delay = 0;
-	unsigned pmask;
+	unsigned int pmask;
 
 	now = psched_get_time();
 
@@ -624,7 +624,7 @@
 		hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
 	}
 
-	sch->flags &= ~TCQ_F_THROTTLED;
+	qdisc_unthrottled(sch);
 	__netif_schedule(qdisc_root(sch));
 	return HRTIMER_NORESTART;
 }
@@ -649,7 +649,6 @@
 		ret = qdisc_enqueue(skb, cl->q);
 		if (ret == NET_XMIT_SUCCESS) {
 			sch->q.qlen++;
-			qdisc_bstats_update(sch, skb);
 			if (!cl->next_alive)
 				cbq_activate_class(cl);
 			return 0;
@@ -665,15 +664,15 @@
 #endif
 
 /*
-   It is mission critical procedure.
+ * It is mission critical procedure.
+ *
+ * We "regenerate" toplevel cutoff, if transmitting class
+ * has backlog and it is not regulated. It is not part of
+ * original CBQ description, but looks more reasonable.
+ * Probably, it is wrong. This question needs further investigation.
+ */
 
-   We "regenerate" toplevel cutoff, if transmitting class
-   has backlog and it is not regulated. It is not part of
-   original CBQ description, but looks more reasonable.
-   Probably, it is wrong. This question needs further investigation.
-*/
-
-static __inline__ void
+static inline void
 cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
 		    struct cbq_class *borrowed)
 {
@@ -684,7 +683,7 @@
 					q->toplevel = borrowed->level;
 					return;
 				}
-			} while ((borrowed=borrowed->borrow) != NULL);
+			} while ((borrowed = borrowed->borrow) != NULL);
 		}
 #if 0
 	/* It is not necessary now. Uncommenting it
@@ -712,10 +711,10 @@
 		cl->bstats.bytes += len;
 
 		/*
-		   (now - last) is total time between packet right edges.
-		   (last_pktlen/rate) is "virtual" busy time, so that
-
-			 idle = (now - last) - last_pktlen/rate
+		 * (now - last) is total time between packet right edges.
+		 * (last_pktlen/rate) is "virtual" busy time, so that
+		 *
+		 *	idle = (now - last) - last_pktlen/rate
 		 */
 
 		idle = q->now - cl->last;
@@ -725,9 +724,9 @@
 			idle -= L2T(cl, len);
 
 		/* true_avgidle := (1-W)*true_avgidle + W*idle,
-		   where W=2^{-ewma_log}. But cl->avgidle is scaled:
-		   cl->avgidle == true_avgidle/W,
-		   hence:
+		 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
+		 * cl->avgidle == true_avgidle/W,
+		 * hence:
 		 */
 			avgidle += idle - (avgidle>>cl->ewma_log);
 		}
@@ -741,22 +740,22 @@
 			cl->avgidle = avgidle;
 
 			/* Calculate expected time, when this class
-			   will be allowed to send.
-			   It will occur, when:
-			   (1-W)*true_avgidle + W*delay = 0, i.e.
-			   idle = (1/W - 1)*(-true_avgidle)
-			   or
-			   idle = (1 - W)*(-cl->avgidle);
+			 * will be allowed to send.
+			 * It will occur, when:
+			 * (1-W)*true_avgidle + W*delay = 0, i.e.
+			 * idle = (1/W - 1)*(-true_avgidle)
+			 * or
+			 * idle = (1 - W)*(-cl->avgidle);
 			 */
 			idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
 
 			/*
-			   That is not all.
-			   To maintain the rate allocated to the class,
-			   we add to undertime virtual clock,
-			   necessary to complete transmitted packet.
-			   (len/phys_bandwidth has been already passed
-			   to the moment of cbq_update)
+			 * That is not all.
+			 * To maintain the rate allocated to the class,
+			 * we add to undertime virtual clock,
+			 * necessary to complete transmitted packet.
+			 * (len/phys_bandwidth has been already passed
+			 * to the moment of cbq_update)
 			 */
 
 			idle -= L2T(&q->link, len);
@@ -778,7 +777,7 @@
 	cbq_update_toplevel(q, this, q->tx_borrowed);
 }
 
-static __inline__ struct cbq_class *
+static inline struct cbq_class *
 cbq_under_limit(struct cbq_class *cl)
 {
 	struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
@@ -794,16 +793,17 @@
 
 	do {
 		/* It is very suspicious place. Now overlimit
-		   action is generated for not bounded classes
-		   only if link is completely congested.
-		   Though it is in agree with ancestor-only paradigm,
-		   it looks very stupid. Particularly,
-		   it means that this chunk of code will either
-		   never be called or result in strong amplification
-		   of burstiness. Dangerous, silly, and, however,
-		   no another solution exists.
+		 * action is generated for not bounded classes
+		 * only if link is completely congested.
+		 * Though it is in agree with ancestor-only paradigm,
+		 * it looks very stupid. Particularly,
+		 * it means that this chunk of code will either
+		 * never be called or result in strong amplification
+		 * of burstiness. Dangerous, silly, and, however,
+		 * no another solution exists.
 		 */
-		if ((cl = cl->borrow) == NULL) {
+		cl = cl->borrow;
+		if (!cl) {
 			this_cl->qstats.overlimits++;
 			this_cl->overlimit(this_cl);
 			return NULL;
@@ -816,7 +816,7 @@
 	return cl;
 }
 
-static __inline__ struct sk_buff *
+static inline struct sk_buff *
 cbq_dequeue_prio(struct Qdisc *sch, int prio)
 {
 	struct cbq_sched_data *q = qdisc_priv(sch);
@@ -840,7 +840,7 @@
 
 			if (cl->deficit <= 0) {
 				/* Class exhausted its allotment per
-				   this round. Switch to the next one.
+				 * this round. Switch to the next one.
 				 */
 				deficit = 1;
 				cl->deficit += cl->quantum;
@@ -850,8 +850,8 @@
 			skb = cl->q->dequeue(cl->q);
 
 			/* Class did not give us any skb :-(
-			   It could occur even if cl->q->q.qlen != 0
-			   f.e. if cl->q == "tbf"
+			 * It could occur even if cl->q->q.qlen != 0
+			 * f.e. if cl->q == "tbf"
 			 */
 			if (skb == NULL)
 				goto skip_class;
@@ -880,7 +880,7 @@
 skip_class:
 			if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
 				/* Class is empty or penalized.
-				   Unlink it from active chain.
+				 * Unlink it from active chain.
 				 */
 				cl_prev->next_alive = cl->next_alive;
 				cl->next_alive = NULL;
@@ -919,14 +919,14 @@
 	return NULL;
 }
 
-static __inline__ struct sk_buff *
+static inline struct sk_buff *
 cbq_dequeue_1(struct Qdisc *sch)
 {
 	struct cbq_sched_data *q = qdisc_priv(sch);
 	struct sk_buff *skb;
-	unsigned activemask;
+	unsigned int activemask;
 
-	activemask = q->activemask&0xFF;
+	activemask = q->activemask & 0xFF;
 	while (activemask) {
 		int prio = ffz(~activemask);
 		activemask &= ~(1<<prio);
@@ -951,11 +951,11 @@
 	if (q->tx_class) {
 		psched_tdiff_t incr2;
 		/* Time integrator. We calculate EOS time
-		   by adding expected packet transmission time.
-		   If real time is greater, we warp artificial clock,
-		   so that:
-
-		   cbq_time = max(real_time, work);
+		 * by adding expected packet transmission time.
+		 * If real time is greater, we warp artificial clock,
+		 * so that:
+		 *
+		 * cbq_time = max(real_time, work);
 		 */
 		incr2 = L2T(&q->link, q->tx_len);
 		q->now += incr2;
@@ -971,28 +971,29 @@
 
 		skb = cbq_dequeue_1(sch);
 		if (skb) {
+			qdisc_bstats_update(sch, skb);
 			sch->q.qlen--;
-			sch->flags &= ~TCQ_F_THROTTLED;
+			qdisc_unthrottled(sch);
 			return skb;
 		}
 
 		/* All the classes are overlimit.
-
-		   It is possible, if:
-
-		   1. Scheduler is empty.
-		   2. Toplevel cutoff inhibited borrowing.
-		   3. Root class is overlimit.
-
-		   Reset 2d and 3d conditions and retry.
-
-		   Note, that NS and cbq-2.0 are buggy, peeking
-		   an arbitrary class is appropriate for ancestor-only
-		   sharing, but not for toplevel algorithm.
-
-		   Our version is better, but slower, because it requires
-		   two passes, but it is unavoidable with top-level sharing.
-		*/
+		 *
+		 * It is possible, if:
+		 *
+		 * 1. Scheduler is empty.
+		 * 2. Toplevel cutoff inhibited borrowing.
+		 * 3. Root class is overlimit.
+		 *
+		 * Reset 2d and 3d conditions and retry.
+		 *
+		 * Note, that NS and cbq-2.0 are buggy, peeking
+		 * an arbitrary class is appropriate for ancestor-only
+		 * sharing, but not for toplevel algorithm.
+		 *
+		 * Our version is better, but slower, because it requires
+		 * two passes, but it is unavoidable with top-level sharing.
+		 */
 
 		if (q->toplevel == TC_CBQ_MAXLEVEL &&
 		    q->link.undertime == PSCHED_PASTPERFECT)
@@ -1003,7 +1004,8 @@
 	}
 
 	/* No packets in scheduler or nobody wants to give them to us :-(
-	   Sigh... start watchdog timer in the last case. */
+	 * Sigh... start watchdog timer in the last case.
+	 */
 
 	if (sch->q.qlen) {
 		sch->qstats.overlimits++;
@@ -1025,13 +1027,14 @@
 		int level = 0;
 		struct cbq_class *cl;
 
-		if ((cl = this->children) != NULL) {
+		cl = this->children;
+		if (cl) {
 			do {
 				if (cl->level > level)
 					level = cl->level;
 			} while ((cl = cl->sibling) != this->children);
 		}
-		this->level = level+1;
+		this->level = level + 1;
 	} while ((this = this->tparent) != NULL);
 }
 
@@ -1047,14 +1050,15 @@
 	for (h = 0; h < q->clhash.hashsize; h++) {
 		hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
 			/* BUGGGG... Beware! This expression suffer of
-			   arithmetic overflows!
+			 * arithmetic overflows!
 			 */
 			if (cl->priority == prio) {
 				cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
 					q->quanta[prio];
 			}
 			if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) {
-				printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum);
+				pr_warning("CBQ: class %08x has bad quantum==%ld, repaired.\n",
+					   cl->common.classid, cl->quantum);
 				cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
 			}
 		}
@@ -1065,18 +1069,18 @@
 {
 	struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
 	struct cbq_class *split = cl->split;
-	unsigned h;
+	unsigned int h;
 	int i;
 
 	if (split == NULL)
 		return;
 
-	for (i=0; i<=TC_PRIO_MAX; i++) {
-		if (split->defaults[i] == cl && !(cl->defmap&(1<<i)))
+	for (i = 0; i <= TC_PRIO_MAX; i++) {
+		if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
 			split->defaults[i] = NULL;
 	}
 
-	for (i=0; i<=TC_PRIO_MAX; i++) {
+	for (i = 0; i <= TC_PRIO_MAX; i++) {
 		int level = split->level;
 
 		if (split->defaults[i])
@@ -1089,7 +1093,7 @@
 			hlist_for_each_entry(c, n, &q->clhash.hash[h],
 					     common.hnode) {
 				if (c->split == split && c->level < level &&
-				    c->defmap&(1<<i)) {
+				    c->defmap & (1<<i)) {
 					split->defaults[i] = c;
 					level = c->level;
 				}
@@ -1103,7 +1107,8 @@
 	struct cbq_class *split = NULL;
 
 	if (splitid == 0) {
-		if ((split = cl->split) == NULL)
+		split = cl->split;
+		if (!split)
 			return;
 		splitid = split->common.classid;
 	}
@@ -1121,9 +1126,9 @@
 		cl->defmap = 0;
 		cbq_sync_defmap(cl);
 		cl->split = split;
-		cl->defmap = def&mask;
+		cl->defmap = def & mask;
 	} else
-		cl->defmap = (cl->defmap&~mask)|(def&mask);
+		cl->defmap = (cl->defmap & ~mask) | (def & mask);
 
 	cbq_sync_defmap(cl);
 }
@@ -1136,7 +1141,7 @@
 	qdisc_class_hash_remove(&q->clhash, &this->common);
 
 	if (this->tparent) {
-		clp=&this->sibling;
+		clp = &this->sibling;
 		cl = *clp;
 		do {
 			if (cl == this) {
@@ -1175,7 +1180,7 @@
 	}
 }
 
-static unsigned int cbq_drop(struct Qdisc* sch)
+static unsigned int cbq_drop(struct Qdisc *sch)
 {
 	struct cbq_sched_data *q = qdisc_priv(sch);
 	struct cbq_class *cl, *cl_head;
@@ -1183,7 +1188,8 @@
 	unsigned int len;
 
 	for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) {
-		if ((cl_head = q->active[prio]) == NULL)
+		cl_head = q->active[prio];
+		if (!cl_head)
 			continue;
 
 		cl = cl_head;
@@ -1200,13 +1206,13 @@
 }
 
 static void
-cbq_reset(struct Qdisc* sch)
+cbq_reset(struct Qdisc *sch)
 {
 	struct cbq_sched_data *q = qdisc_priv(sch);
 	struct cbq_class *cl;
 	struct hlist_node *n;
 	int prio;
-	unsigned h;
+	unsigned int h;
 
 	q->activemask = 0;
 	q->pmask = 0;
@@ -1238,21 +1244,21 @@
 
 static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
 {
-	if (lss->change&TCF_CBQ_LSS_FLAGS) {
-		cl->share = (lss->flags&TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
-		cl->borrow = (lss->flags&TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
+	if (lss->change & TCF_CBQ_LSS_FLAGS) {
+		cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
+		cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
 	}
-	if (lss->change&TCF_CBQ_LSS_EWMA)
+	if (lss->change & TCF_CBQ_LSS_EWMA)
 		cl->ewma_log = lss->ewma_log;
-	if (lss->change&TCF_CBQ_LSS_AVPKT)
+	if (lss->change & TCF_CBQ_LSS_AVPKT)
 		cl->avpkt = lss->avpkt;
-	if (lss->change&TCF_CBQ_LSS_MINIDLE)
+	if (lss->change & TCF_CBQ_LSS_MINIDLE)
 		cl->minidle = -(long)lss->minidle;
-	if (lss->change&TCF_CBQ_LSS_MAXIDLE) {
+	if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
 		cl->maxidle = lss->maxidle;
 		cl->avgidle = lss->maxidle;
 	}
-	if (lss->change&TCF_CBQ_LSS_OFFTIME)
+	if (lss->change & TCF_CBQ_LSS_OFFTIME)
 		cl->offtime = lss->offtime;
 	return 0;
 }
@@ -1280,10 +1286,10 @@
 	if (wrr->weight)
 		cl->weight = wrr->weight;
 	if (wrr->priority) {
-		cl->priority = wrr->priority-1;
+		cl->priority = wrr->priority - 1;
 		cl->cpriority = cl->priority;
 		if (cl->priority >= cl->priority2)
-			cl->priority2 = TC_CBQ_MAXPRIO-1;
+			cl->priority2 = TC_CBQ_MAXPRIO - 1;
 	}
 
 	cbq_addprio(q, cl);
@@ -1300,10 +1306,10 @@
 		cl->overlimit = cbq_ovl_delay;
 		break;
 	case TC_CBQ_OVL_LOWPRIO:
-		if (ovl->priority2-1 >= TC_CBQ_MAXPRIO ||
-		    ovl->priority2-1 <= cl->priority)
+		if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO ||
+		    ovl->priority2 - 1 <= cl->priority)
 			return -EINVAL;
-		cl->priority2 = ovl->priority2-1;
+		cl->priority2 = ovl->priority2 - 1;
 		cl->overlimit = cbq_ovl_lowprio;
 		break;
 	case TC_CBQ_OVL_DROP:
@@ -1382,9 +1388,9 @@
 	if (!q->link.q)
 		q->link.q = &noop_qdisc;
 
-	q->link.priority = TC_CBQ_MAXPRIO-1;
-	q->link.priority2 = TC_CBQ_MAXPRIO-1;
-	q->link.cpriority = TC_CBQ_MAXPRIO-1;
+	q->link.priority = TC_CBQ_MAXPRIO - 1;
+	q->link.priority2 = TC_CBQ_MAXPRIO - 1;
+	q->link.cpriority = TC_CBQ_MAXPRIO - 1;
 	q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
 	q->link.overlimit = cbq_ovl_classic;
 	q->link.allot = psched_mtu(qdisc_dev(sch));
@@ -1415,7 +1421,7 @@
 	return err;
 }
 
-static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
 {
 	unsigned char *b = skb_tail_pointer(skb);
 
@@ -1427,7 +1433,7 @@
 	return -1;
 }
 
-static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
 {
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tc_cbq_lssopt opt;
@@ -1452,15 +1458,15 @@
 	return -1;
 }
 
-static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
 {
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tc_cbq_wrropt opt;
 
 	opt.flags = 0;
 	opt.allot = cl->allot;
-	opt.priority = cl->priority+1;
-	opt.cpriority = cl->cpriority+1;
+	opt.priority = cl->priority + 1;
+	opt.cpriority = cl->cpriority + 1;
 	opt.weight = cl->weight;
 	NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt);
 	return skb->len;
@@ -1470,13 +1476,13 @@
 	return -1;
 }
 
-static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
 {
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tc_cbq_ovl opt;
 
 	opt.strategy = cl->ovl_strategy;
-	opt.priority2 = cl->priority2+1;
+	opt.priority2 = cl->priority2 + 1;
 	opt.pad = 0;
 	opt.penalty = cl->penalty;
 	NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt);
@@ -1487,7 +1493,7 @@
 	return -1;
 }
 
-static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
 {
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tc_cbq_fopt opt;
@@ -1506,7 +1512,7 @@
 }
 
 #ifdef CONFIG_NET_CLS_ACT
-static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
+static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
 {
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tc_cbq_police opt;
@@ -1570,7 +1576,7 @@
 cbq_dump_class(struct Qdisc *sch, unsigned long arg,
 	       struct sk_buff *skb, struct tcmsg *tcm)
 {
-	struct cbq_class *cl = (struct cbq_class*)arg;
+	struct cbq_class *cl = (struct cbq_class *)arg;
 	struct nlattr *nest;
 
 	if (cl->tparent)
@@ -1598,7 +1604,7 @@
 	struct gnet_dump *d)
 {
 	struct cbq_sched_data *q = qdisc_priv(sch);
-	struct cbq_class *cl = (struct cbq_class*)arg;
+	struct cbq_class *cl = (struct cbq_class *)arg;
 
 	cl->qstats.qlen = cl->q->q.qlen;
 	cl->xstats.avgidle = cl->avgidle;
@@ -1618,7 +1624,7 @@
 static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
 		     struct Qdisc **old)
 {
-	struct cbq_class *cl = (struct cbq_class*)arg;
+	struct cbq_class *cl = (struct cbq_class *)arg;
 
 	if (new == NULL) {
 		new = qdisc_create_dflt(sch->dev_queue,
@@ -1641,10 +1647,9 @@
 	return 0;
 }
 
-static struct Qdisc *
-cbq_leaf(struct Qdisc *sch, unsigned long arg)
+static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
 {
-	struct cbq_class *cl = (struct cbq_class*)arg;
+	struct cbq_class *cl = (struct cbq_class *)arg;
 
 	return cl->q;
 }
@@ -1683,13 +1688,12 @@
 		kfree(cl);
 }
 
-static void
-cbq_destroy(struct Qdisc* sch)
+static void cbq_destroy(struct Qdisc *sch)
 {
 	struct cbq_sched_data *q = qdisc_priv(sch);
 	struct hlist_node *n, *next;
 	struct cbq_class *cl;
-	unsigned h;
+	unsigned int h;
 
 #ifdef CONFIG_NET_CLS_ACT
 	q->rx_class = NULL;
@@ -1713,7 +1717,7 @@
 
 static void cbq_put(struct Qdisc *sch, unsigned long arg)
 {
-	struct cbq_class *cl = (struct cbq_class*)arg;
+	struct cbq_class *cl = (struct cbq_class *)arg;
 
 	if (--cl->refcnt == 0) {
 #ifdef CONFIG_NET_CLS_ACT
@@ -1736,7 +1740,7 @@
 {
 	int err;
 	struct cbq_sched_data *q = qdisc_priv(sch);
-	struct cbq_class *cl = (struct cbq_class*)*arg;
+	struct cbq_class *cl = (struct cbq_class *)*arg;
 	struct nlattr *opt = tca[TCA_OPTIONS];
 	struct nlattr *tb[TCA_CBQ_MAX + 1];
 	struct cbq_class *parent;
@@ -1828,13 +1832,14 @@
 
 	if (classid) {
 		err = -EINVAL;
-		if (TC_H_MAJ(classid^sch->handle) || cbq_class_lookup(q, classid))
+		if (TC_H_MAJ(classid ^ sch->handle) ||
+		    cbq_class_lookup(q, classid))
 			goto failure;
 	} else {
 		int i;
-		classid = TC_H_MAKE(sch->handle,0x8000);
+		classid = TC_H_MAKE(sch->handle, 0x8000);
 
-		for (i=0; i<0x8000; i++) {
+		for (i = 0; i < 0x8000; i++) {
 			if (++q->hgenerator >= 0x8000)
 				q->hgenerator = 1;
 			if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
@@ -1891,11 +1896,11 @@
 	cl->minidle = -0x7FFFFFFF;
 	cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
 	cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
-	if (cl->ewma_log==0)
+	if (cl->ewma_log == 0)
 		cl->ewma_log = q->link.ewma_log;
-	if (cl->maxidle==0)
+	if (cl->maxidle == 0)
 		cl->maxidle = q->link.maxidle;
-	if (cl->avpkt==0)
+	if (cl->avpkt == 0)
 		cl->avpkt = q->link.avpkt;
 	cl->overlimit = cbq_ovl_classic;
 	if (tb[TCA_CBQ_OVL_STRATEGY])
@@ -1921,7 +1926,7 @@
 static int cbq_delete(struct Qdisc *sch, unsigned long arg)
 {
 	struct cbq_sched_data *q = qdisc_priv(sch);
-	struct cbq_class *cl = (struct cbq_class*)arg;
+	struct cbq_class *cl = (struct cbq_class *)arg;
 	unsigned int qlen;
 
 	if (cl->filters || cl->children || cl == &q->link)
@@ -1979,7 +1984,7 @@
 				     u32 classid)
 {
 	struct cbq_sched_data *q = qdisc_priv(sch);
-	struct cbq_class *p = (struct cbq_class*)parent;
+	struct cbq_class *p = (struct cbq_class *)parent;
 	struct cbq_class *cl = cbq_class_lookup(q, classid);
 
 	if (cl) {
@@ -1993,7 +1998,7 @@
 
 static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
 {
-	struct cbq_class *cl = (struct cbq_class*)arg;
+	struct cbq_class *cl = (struct cbq_class *)arg;
 
 	cl->filters--;
 }
@@ -2003,7 +2008,7 @@
 	struct cbq_sched_data *q = qdisc_priv(sch);
 	struct cbq_class *cl;
 	struct hlist_node *n;
-	unsigned h;
+	unsigned int h;
 
 	if (arg->stop)
 		return;
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
new file mode 100644
index 0000000..ee1e209
--- /dev/null
+++ b/net/sched/sch_choke.c
@@ -0,0 +1,677 @@
+/*
+ * net/sched/sch_choke.c	CHOKE scheduler
+ *
+ * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
+ * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/reciprocal_div.h>
+#include <linux/vmalloc.h>
+#include <net/pkt_sched.h>
+#include <net/inet_ecn.h>
+#include <net/red.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+
+/*
+   CHOKe stateless AQM for fair bandwidth allocation
+   =================================================
+
+   CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
+   unresponsive flows) is a variant of RED that penalizes misbehaving flows but
+   maintains no flow state. The difference from RED is an additional step
+   during the enqueuing process. If average queue size is over the
+   low threshold (qmin), a packet is chosen at random from the queue.
+   If both the new and chosen packet are from the same flow, both
+   are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
+   needs to access packets in queue randomly. It has a minimal class
+   interface to allow overriding the builtin flow classifier with
+   filters.
+
+   Source:
+   R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
+   Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
+   IEEE INFOCOM, 2000.
+
+   A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
+   Characteristics", IEEE/ACM Transactions on Networking, 2004
+
+ */
+
+/* Upper bound on size of sk_buff table (packets) */
+#define CHOKE_MAX_QUEUE	(128*1024 - 1)
+
+struct choke_sched_data {
+/* Parameters */
+	u32		 limit;
+	unsigned char	 flags;
+
+	struct red_parms parms;
+
+/* Variables */
+	struct tcf_proto *filter_list;
+	struct {
+		u32	prob_drop;	/* Early probability drops */
+		u32	prob_mark;	/* Early probability marks */
+		u32	forced_drop;	/* Forced drops, qavg > max_thresh */
+		u32	forced_mark;	/* Forced marks, qavg > max_thresh */
+		u32	pdrop;          /* Drops due to queue limits */
+		u32	other;          /* Drops due to drop() calls */
+		u32	matched;	/* Drops to flow match */
+	} stats;
+
+	unsigned int	 head;
+	unsigned int	 tail;
+
+	unsigned int	 tab_mask; /* size - 1 */
+
+	struct sk_buff **tab;
+};
+
+/* deliver a random number between 0 and N - 1 */
+static u32 random_N(unsigned int N)
+{
+	return reciprocal_divide(random32(), N);
+}
+
+/* number of elements in queue including holes */
+static unsigned int choke_len(const struct choke_sched_data *q)
+{
+	return (q->tail - q->head) & q->tab_mask;
+}
+
+/* Is ECN parameter configured */
+static int use_ecn(const struct choke_sched_data *q)
+{
+	return q->flags & TC_RED_ECN;
+}
+
+/* Should packets over max just be dropped (versus marked) */
+static int use_harddrop(const struct choke_sched_data *q)
+{
+	return q->flags & TC_RED_HARDDROP;
+}
+
+/* Move head pointer forward to skip over holes */
+static void choke_zap_head_holes(struct choke_sched_data *q)
+{
+	do {
+		q->head = (q->head + 1) & q->tab_mask;
+		if (q->head == q->tail)
+			break;
+	} while (q->tab[q->head] == NULL);
+}
+
+/* Move tail pointer backwards to reuse holes */
+static void choke_zap_tail_holes(struct choke_sched_data *q)
+{
+	do {
+		q->tail = (q->tail - 1) & q->tab_mask;
+		if (q->head == q->tail)
+			break;
+	} while (q->tab[q->tail] == NULL);
+}
+
+/* Drop packet from queue array by creating a "hole" */
+static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
+{
+	struct choke_sched_data *q = qdisc_priv(sch);
+	struct sk_buff *skb = q->tab[idx];
+
+	q->tab[idx] = NULL;
+
+	if (idx == q->head)
+		choke_zap_head_holes(q);
+	if (idx == q->tail)
+		choke_zap_tail_holes(q);
+
+	sch->qstats.backlog -= qdisc_pkt_len(skb);
+	qdisc_drop(skb, sch);
+	qdisc_tree_decrease_qlen(sch, 1);
+	--sch->q.qlen;
+}
+
+/*
+ * Compare flow of two packets
+ *  Returns true only if source and destination address and port match.
+ *          false for special cases
+ */
+static bool choke_match_flow(struct sk_buff *skb1,
+			     struct sk_buff *skb2)
+{
+	int off1, off2, poff;
+	const u32 *ports1, *ports2;
+	u8 ip_proto;
+	__u32 hash1;
+
+	if (skb1->protocol != skb2->protocol)
+		return false;
+
+	/* Use hash value as quick check
+	 * Assumes that __skb_get_rxhash makes IP header and ports linear
+	 */
+	hash1 = skb_get_rxhash(skb1);
+	if (!hash1 || hash1 != skb_get_rxhash(skb2))
+		return false;
+
+	/* Probably match, but be sure to avoid hash collisions */
+	off1 = skb_network_offset(skb1);
+	off2 = skb_network_offset(skb2);
+
+	switch (skb1->protocol) {
+	case __constant_htons(ETH_P_IP): {
+		const struct iphdr *ip1, *ip2;
+
+		ip1 = (const struct iphdr *) (skb1->data + off1);
+		ip2 = (const struct iphdr *) (skb2->data + off2);
+
+		ip_proto = ip1->protocol;
+		if (ip_proto != ip2->protocol ||
+		    ip1->saddr != ip2->saddr || ip1->daddr != ip2->daddr)
+			return false;
+
+		if ((ip1->frag_off | ip2->frag_off) & htons(IP_MF | IP_OFFSET))
+			ip_proto = 0;
+		off1 += ip1->ihl * 4;
+		off2 += ip2->ihl * 4;
+		break;
+	}
+
+	case __constant_htons(ETH_P_IPV6): {
+		const struct ipv6hdr *ip1, *ip2;
+
+		ip1 = (const struct ipv6hdr *) (skb1->data + off1);
+		ip2 = (const struct ipv6hdr *) (skb2->data + off2);
+
+		ip_proto = ip1->nexthdr;
+		if (ip_proto != ip2->nexthdr ||
+		    ipv6_addr_cmp(&ip1->saddr, &ip2->saddr) ||
+		    ipv6_addr_cmp(&ip1->daddr, &ip2->daddr))
+			return false;
+		off1 += 40;
+		off2 += 40;
+	}
+
+	default: /* Maybe compare MAC header here? */
+		return false;
+	}
+
+	poff = proto_ports_offset(ip_proto);
+	if (poff < 0)
+		return true;
+
+	off1 += poff;
+	off2 += poff;
+
+	ports1 = (__force u32 *)(skb1->data + off1);
+	ports2 = (__force u32 *)(skb2->data + off2);
+	return *ports1 == *ports2;
+}
+
+static inline void choke_set_classid(struct sk_buff *skb, u16 classid)
+{
+	*(unsigned int *)(qdisc_skb_cb(skb)->data) = classid;
+}
+
+static u16 choke_get_classid(const struct sk_buff *skb)
+{
+	return *(unsigned int *)(qdisc_skb_cb(skb)->data);
+}
+
+/*
+ * Classify flow using either:
+ *  1. pre-existing classification result in skb
+ *  2. fast internal classification
+ *  3. use TC filter based classification
+ */
+static bool choke_classify(struct sk_buff *skb,
+			   struct Qdisc *sch, int *qerr)
+
+{
+	struct choke_sched_data *q = qdisc_priv(sch);
+	struct tcf_result res;
+	int result;
+
+	result = tc_classify(skb, q->filter_list, &res);
+	if (result >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+		switch (result) {
+		case TC_ACT_STOLEN:
+		case TC_ACT_QUEUED:
+			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
+		case TC_ACT_SHOT:
+			return false;
+		}
+#endif
+		choke_set_classid(skb, TC_H_MIN(res.classid));
+		return true;
+	}
+
+	return false;
+}
+
+/*
+ * Select a packet at random from queue
+ * HACK: since queue can have holes from previous deletion; retry several
+ *   times to find a random skb but then just give up and return the head
+ * Will return NULL if queue is empty (q->head == q->tail)
+ */
+static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
+					 unsigned int *pidx)
+{
+	struct sk_buff *skb;
+	int retrys = 3;
+
+	do {
+		*pidx = (q->head + random_N(choke_len(q))) & q->tab_mask;
+		skb = q->tab[*pidx];
+		if (skb)
+			return skb;
+	} while (--retrys > 0);
+
+	return q->tab[*pidx = q->head];
+}
+
+/*
+ * Compare new packet with random packet in queue
+ * returns true if matched and sets *pidx
+ */
+static bool choke_match_random(const struct choke_sched_data *q,
+			       struct sk_buff *nskb,
+			       unsigned int *pidx)
+{
+	struct sk_buff *oskb;
+
+	if (q->head == q->tail)
+		return false;
+
+	oskb = choke_peek_random(q, pidx);
+	if (q->filter_list)
+		return choke_get_classid(nskb) == choke_get_classid(oskb);
+
+	return choke_match_flow(oskb, nskb);
+}
+
+static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+	struct choke_sched_data *q = qdisc_priv(sch);
+	struct red_parms *p = &q->parms;
+	int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
+
+	if (q->filter_list) {
+		/* If using external classifiers, get result and record it. */
+		if (!choke_classify(skb, sch, &ret))
+			goto other_drop;	/* Packet was eaten by filter */
+	}
+
+	/* Compute average queue usage (see RED) */
+	p->qavg = red_calc_qavg(p, sch->q.qlen);
+	if (red_is_idling(p))
+		red_end_of_idle_period(p);
+
+	/* Is queue small? */
+	if (p->qavg <= p->qth_min)
+		p->qcount = -1;
+	else {
+		unsigned int idx;
+
+		/* Draw a packet at random from queue and compare flow */
+		if (choke_match_random(q, skb, &idx)) {
+			q->stats.matched++;
+			choke_drop_by_idx(sch, idx);
+			goto congestion_drop;
+		}
+
+		/* Queue is large, always mark/drop */
+		if (p->qavg > p->qth_max) {
+			p->qcount = -1;
+
+			sch->qstats.overlimits++;
+			if (use_harddrop(q) || !use_ecn(q) ||
+			    !INET_ECN_set_ce(skb)) {
+				q->stats.forced_drop++;
+				goto congestion_drop;
+			}
+
+			q->stats.forced_mark++;
+		} else if (++p->qcount) {
+			if (red_mark_probability(p, p->qavg)) {
+				p->qcount = 0;
+				p->qR = red_random(p);
+
+				sch->qstats.overlimits++;
+				if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
+					q->stats.prob_drop++;
+					goto congestion_drop;
+				}
+
+				q->stats.prob_mark++;
+			}
+		} else
+			p->qR = red_random(p);
+	}
+
+	/* Admit new packet */
+	if (sch->q.qlen < q->limit) {
+		q->tab[q->tail] = skb;
+		q->tail = (q->tail + 1) & q->tab_mask;
+		++sch->q.qlen;
+		sch->qstats.backlog += qdisc_pkt_len(skb);
+		return NET_XMIT_SUCCESS;
+	}
+
+	q->stats.pdrop++;
+	sch->qstats.drops++;
+	kfree_skb(skb);
+	return NET_XMIT_DROP;
+
+ congestion_drop:
+	qdisc_drop(skb, sch);
+	return NET_XMIT_CN;
+
+ other_drop:
+	if (ret & __NET_XMIT_BYPASS)
+		sch->qstats.drops++;
+	kfree_skb(skb);
+	return ret;
+}
+
+static struct sk_buff *choke_dequeue(struct Qdisc *sch)
+{
+	struct choke_sched_data *q = qdisc_priv(sch);
+	struct sk_buff *skb;
+
+	if (q->head == q->tail) {
+		if (!red_is_idling(&q->parms))
+			red_start_of_idle_period(&q->parms);
+		return NULL;
+	}
+
+	skb = q->tab[q->head];
+	q->tab[q->head] = NULL;
+	choke_zap_head_holes(q);
+	--sch->q.qlen;
+	sch->qstats.backlog -= qdisc_pkt_len(skb);
+	qdisc_bstats_update(sch, skb);
+
+	return skb;
+}
+
+static unsigned int choke_drop(struct Qdisc *sch)
+{
+	struct choke_sched_data *q = qdisc_priv(sch);
+	unsigned int len;
+
+	len = qdisc_queue_drop(sch);
+	if (len > 0)
+		q->stats.other++;
+	else {
+		if (!red_is_idling(&q->parms))
+			red_start_of_idle_period(&q->parms);
+	}
+
+	return len;
+}
+
+static void choke_reset(struct Qdisc *sch)
+{
+	struct choke_sched_data *q = qdisc_priv(sch);
+
+	red_restart(&q->parms);
+}
+
+static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
+	[TCA_CHOKE_PARMS]	= { .len = sizeof(struct tc_red_qopt) },
+	[TCA_CHOKE_STAB]	= { .len = RED_STAB_SIZE },
+};
+
+
+static void choke_free(void *addr)
+{
+	if (addr) {
+		if (is_vmalloc_addr(addr))
+			vfree(addr);
+		else
+			kfree(addr);
+	}
+}
+
+static int choke_change(struct Qdisc *sch, struct nlattr *opt)
+{
+	struct choke_sched_data *q = qdisc_priv(sch);
+	struct nlattr *tb[TCA_CHOKE_MAX + 1];
+	const struct tc_red_qopt *ctl;
+	int err;
+	struct sk_buff **old = NULL;
+	unsigned int mask;
+
+	if (opt == NULL)
+		return -EINVAL;
+
+	err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy);
+	if (err < 0)
+		return err;
+
+	if (tb[TCA_CHOKE_PARMS] == NULL ||
+	    tb[TCA_CHOKE_STAB] == NULL)
+		return -EINVAL;
+
+	ctl = nla_data(tb[TCA_CHOKE_PARMS]);
+
+	if (ctl->limit > CHOKE_MAX_QUEUE)
+		return -EINVAL;
+
+	mask = roundup_pow_of_two(ctl->limit + 1) - 1;
+	if (mask != q->tab_mask) {
+		struct sk_buff **ntab;
+
+		ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
+		if (!ntab)
+			ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
+		if (!ntab)
+			return -ENOMEM;
+
+		sch_tree_lock(sch);
+		old = q->tab;
+		if (old) {
+			unsigned int oqlen = sch->q.qlen, tail = 0;
+
+			while (q->head != q->tail) {
+				struct sk_buff *skb = q->tab[q->head];
+
+				q->head = (q->head + 1) & q->tab_mask;
+				if (!skb)
+					continue;
+				if (tail < mask) {
+					ntab[tail++] = skb;
+					continue;
+				}
+				sch->qstats.backlog -= qdisc_pkt_len(skb);
+				--sch->q.qlen;
+				qdisc_drop(skb, sch);
+			}
+			qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
+			q->head = 0;
+			q->tail = tail;
+		}
+
+		q->tab_mask = mask;
+		q->tab = ntab;
+	} else
+		sch_tree_lock(sch);
+
+	q->flags = ctl->flags;
+	q->limit = ctl->limit;
+
+	red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
+		      ctl->Plog, ctl->Scell_log,
+		      nla_data(tb[TCA_CHOKE_STAB]));
+
+	if (q->head == q->tail)
+		red_end_of_idle_period(&q->parms);
+
+	sch_tree_unlock(sch);
+	choke_free(old);
+	return 0;
+}
+
+static int choke_init(struct Qdisc *sch, struct nlattr *opt)
+{
+	return choke_change(sch, opt);
+}
+
+static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+	struct choke_sched_data *q = qdisc_priv(sch);
+	struct nlattr *opts = NULL;
+	struct tc_red_qopt opt = {
+		.limit		= q->limit,
+		.flags		= q->flags,
+		.qth_min	= q->parms.qth_min >> q->parms.Wlog,
+		.qth_max	= q->parms.qth_max >> q->parms.Wlog,
+		.Wlog		= q->parms.Wlog,
+		.Plog		= q->parms.Plog,
+		.Scell_log	= q->parms.Scell_log,
+	};
+
+	opts = nla_nest_start(skb, TCA_OPTIONS);
+	if (opts == NULL)
+		goto nla_put_failure;
+
+	NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt);
+	return nla_nest_end(skb, opts);
+
+nla_put_failure:
+	nla_nest_cancel(skb, opts);
+	return -EMSGSIZE;
+}
+
+static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+	struct choke_sched_data *q = qdisc_priv(sch);
+	struct tc_choke_xstats st = {
+		.early	= q->stats.prob_drop + q->stats.forced_drop,
+		.marked	= q->stats.prob_mark + q->stats.forced_mark,
+		.pdrop	= q->stats.pdrop,
+		.other	= q->stats.other,
+		.matched = q->stats.matched,
+	};
+
+	return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static void choke_destroy(struct Qdisc *sch)
+{
+	struct choke_sched_data *q = qdisc_priv(sch);
+
+	tcf_destroy_chain(&q->filter_list);
+	choke_free(q->tab);
+}
+
+static struct Qdisc *choke_leaf(struct Qdisc *sch, unsigned long arg)
+{
+	return NULL;
+}
+
+static unsigned long choke_get(struct Qdisc *sch, u32 classid)
+{
+	return 0;
+}
+
+static void choke_put(struct Qdisc *q, unsigned long cl)
+{
+}
+
+static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent,
+				u32 classid)
+{
+	return 0;
+}
+
+static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl)
+{
+	struct choke_sched_data *q = qdisc_priv(sch);
+
+	if (cl)
+		return NULL;
+	return &q->filter_list;
+}
+
+static int choke_dump_class(struct Qdisc *sch, unsigned long cl,
+			  struct sk_buff *skb, struct tcmsg *tcm)
+{
+	tcm->tcm_handle |= TC_H_MIN(cl);
+	return 0;
+}
+
+static void choke_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+	if (!arg->stop) {
+		if (arg->fn(sch, 1, arg) < 0) {
+			arg->stop = 1;
+			return;
+		}
+		arg->count++;
+	}
+}
+
+static const struct Qdisc_class_ops choke_class_ops = {
+	.leaf		=	choke_leaf,
+	.get		=	choke_get,
+	.put		=	choke_put,
+	.tcf_chain	=	choke_find_tcf,
+	.bind_tcf	=	choke_bind,
+	.unbind_tcf	=	choke_put,
+	.dump		=	choke_dump_class,
+	.walk		=	choke_walk,
+};
+
+static struct sk_buff *choke_peek_head(struct Qdisc *sch)
+{
+	struct choke_sched_data *q = qdisc_priv(sch);
+
+	return (q->head != q->tail) ? q->tab[q->head] : NULL;
+}
+
+static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
+	.id		=	"choke",
+	.priv_size	=	sizeof(struct choke_sched_data),
+
+	.enqueue	=	choke_enqueue,
+	.dequeue	=	choke_dequeue,
+	.peek		=	choke_peek_head,
+	.drop		=	choke_drop,
+	.init		=	choke_init,
+	.destroy	=	choke_destroy,
+	.reset		=	choke_reset,
+	.change		=	choke_change,
+	.dump		=	choke_dump,
+	.dump_stats	=	choke_dump_stats,
+	.owner		=	THIS_MODULE,
+};
+
+static int __init choke_module_init(void)
+{
+	return register_qdisc(&choke_qdisc_ops);
+}
+
+static void __exit choke_module_exit(void)
+{
+	unregister_qdisc(&choke_qdisc_ops);
+}
+
+module_init(choke_module_init)
+module_exit(choke_module_exit)
+
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index de55e64..6b7fe4a 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -376,7 +376,6 @@
 	}
 
 	bstats_update(&cl->bstats, skb);
-	qdisc_bstats_update(sch, skb);
 
 	sch->q.qlen++;
 	return err;
@@ -403,6 +402,7 @@
 			skb = qdisc_dequeue_peeked(cl->qdisc);
 			if (cl->qdisc->q.qlen == 0)
 				list_del(&cl->alist);
+			qdisc_bstats_update(sch, skb);
 			sch->q.qlen--;
 			return skb;
 		}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 60f4bdd..2c79020 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -137,10 +137,10 @@
 		mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
 
 	if (tb[TCA_DSMARK_VALUE])
-		p->value[*arg-1] = nla_get_u8(tb[TCA_DSMARK_VALUE]);
+		p->value[*arg - 1] = nla_get_u8(tb[TCA_DSMARK_VALUE]);
 
 	if (tb[TCA_DSMARK_MASK])
-		p->mask[*arg-1] = mask;
+		p->mask[*arg - 1] = mask;
 
 	err = 0;
 
@@ -155,8 +155,8 @@
 	if (!dsmark_valid_index(p, arg))
 		return -EINVAL;
 
-	p->mask[arg-1] = 0xff;
-	p->value[arg-1] = 0;
+	p->mask[arg - 1] = 0xff;
+	p->value[arg - 1] = 0;
 
 	return 0;
 }
@@ -175,7 +175,7 @@
 		if (p->mask[i] == 0xff && !p->value[i])
 			goto ignore;
 		if (walker->count >= walker->skip) {
-			if (walker->fn(sch, i+1, walker) < 0) {
+			if (walker->fn(sch, i + 1, walker) < 0) {
 				walker->stop = 1;
 				break;
 			}
@@ -260,7 +260,6 @@
 		return err;
 	}
 
-	qdisc_bstats_update(sch, skb);
 	sch->q.qlen++;
 
 	return NET_XMIT_SUCCESS;
@@ -283,6 +282,7 @@
 	if (skb == NULL)
 		return NULL;
 
+	qdisc_bstats_update(sch, skb);
 	sch->q.qlen--;
 
 	index = skb->tc_index & (p->indices - 1);
@@ -304,9 +304,8 @@
 		 * and don't need yet another qdisc as a bypass.
 		 */
 		if (p->mask[index] != 0xff || p->value[index])
-			printk(KERN_WARNING
-			       "dsmark_dequeue: unsupported protocol %d\n",
-			       ntohs(skb->protocol));
+			pr_warning("dsmark_dequeue: unsupported protocol %d\n",
+				   ntohs(skb->protocol));
 		break;
 	}
 
@@ -424,14 +423,14 @@
 	if (!dsmark_valid_index(p, cl))
 		return -EINVAL;
 
-	tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl-1);
+	tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
 	tcm->tcm_info = p->q->handle;
 
 	opts = nla_nest_start(skb, TCA_OPTIONS);
 	if (opts == NULL)
 		goto nla_put_failure;
-	NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl-1]);
-	NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl-1]);
+	NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]);
+	NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]);
 
 	return nla_nest_end(skb, opts);
 
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index aa4d633..be33f9d 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -19,12 +19,11 @@
 
 /* 1 band FIFO pseudo-"scheduler" */
 
-struct fifo_sched_data
-{
+struct fifo_sched_data {
 	u32 limit;
 };
 
-static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
 	struct fifo_sched_data *q = qdisc_priv(sch);
 
@@ -34,7 +33,7 @@
 	return qdisc_reshape_fail(skb, sch);
 }
 
-static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
 	struct fifo_sched_data *q = qdisc_priv(sch);
 
@@ -44,19 +43,16 @@
 	return qdisc_reshape_fail(skb, sch);
 }
 
-static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
-	struct sk_buff *skb_head;
 	struct fifo_sched_data *q = qdisc_priv(sch);
 
 	if (likely(skb_queue_len(&sch->q) < q->limit))
 		return qdisc_enqueue_tail(skb, sch);
 
 	/* queue full, remove one skb to fulfill the limit */
-	skb_head = qdisc_dequeue_head(sch);
+	__qdisc_queue_drop_head(sch, &sch->q);
 	sch->qstats.drops++;
-	kfree_skb(skb_head);
-
 	qdisc_enqueue_tail(skb, sch);
 
 	return NET_XMIT_CN;
@@ -65,11 +61,13 @@
 static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
 {
 	struct fifo_sched_data *q = qdisc_priv(sch);
+	bool bypass;
+	bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
 
 	if (opt == NULL) {
 		u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
 
-		if (sch->ops == &bfifo_qdisc_ops)
+		if (is_bfifo)
 			limit *= psched_mtu(qdisc_dev(sch));
 
 		q->limit = limit;
@@ -82,6 +80,15 @@
 		q->limit = ctl->limit;
 	}
 
+	if (is_bfifo)
+		bypass = q->limit >= psched_mtu(qdisc_dev(sch));
+	else
+		bypass = q->limit >= 1;
+
+	if (bypass)
+		sch->flags |= TCQ_F_CAN_BYPASS;
+	else
+		sch->flags &= ~TCQ_F_CAN_BYPASS;
 	return 0;
 }
 
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 34dc598..0da09d5 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -87,8 +87,8 @@
 		 */
 		kfree_skb(skb);
 		if (net_ratelimit())
-			printk(KERN_WARNING "Dead loop on netdevice %s, "
-			       "fix it urgently!\n", dev_queue->dev->name);
+			pr_warning("Dead loop on netdevice %s, fix it urgently!\n",
+				   dev_queue->dev->name);
 		ret = qdisc_qlen(q);
 	} else {
 		/*
@@ -137,8 +137,8 @@
 	} else {
 		/* Driver returned NETDEV_TX_BUSY - requeue skb */
 		if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
-			printk(KERN_WARNING "BUG %s code %d qlen %d\n",
-			       dev->name, ret, q->q.qlen);
+			pr_warning("BUG %s code %d qlen %d\n",
+				   dev->name, ret, q->q.qlen);
 
 		ret = dev_requeue_skb(skb, q);
 	}
@@ -412,8 +412,9 @@
 };
 
 
-static const u8 prio2band[TC_PRIO_MAX+1] =
-	{ 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
+static const u8 prio2band[TC_PRIO_MAX + 1] = {
+	1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
+};
 
 /* 3-band FIFO queue: old style, but should be a bit faster than
    generic prio+fifo combination.
@@ -445,7 +446,7 @@
 	return priv->q + band;
 }
 
-static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
+static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc)
 {
 	if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
 		int band = prio2band[skb->priority & TC_PRIO_MAX];
@@ -460,7 +461,7 @@
 	return qdisc_drop(skb, qdisc);
 }
 
-static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
+static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
 {
 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 	int band = bitmap2band[priv->bitmap];
@@ -479,7 +480,7 @@
 	return NULL;
 }
 
-static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
+static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
 {
 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
 	int band = bitmap2band[priv->bitmap];
@@ -493,7 +494,7 @@
 	return NULL;
 }
 
-static void pfifo_fast_reset(struct Qdisc* qdisc)
+static void pfifo_fast_reset(struct Qdisc *qdisc)
 {
 	int prio;
 	struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
@@ -510,7 +511,7 @@
 {
 	struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
 
-	memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
+	memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
 	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
 	return skb->len;
 
@@ -526,6 +527,8 @@
 	for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
 		skb_queue_head_init(band2list(priv, prio));
 
+	/* Can by-pass the queue discipline */
+	qdisc->flags |= TCQ_F_CAN_BYPASS;
 	return 0;
 }
 
@@ -540,6 +543,7 @@
 	.dump		=	pfifo_fast_dump,
 	.owner		=	THIS_MODULE,
 };
+EXPORT_SYMBOL(pfifo_fast_ops);
 
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
 			  struct Qdisc_ops *ops)
@@ -630,7 +634,7 @@
 #ifdef CONFIG_NET_SCHED
 	qdisc_list_del(qdisc);
 
-	qdisc_put_stab(qdisc->stab);
+	qdisc_put_stab(rtnl_dereference(qdisc->stab));
 #endif
 	gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
 	if (ops->reset)
@@ -674,25 +678,21 @@
 
 	return oqdisc;
 }
+EXPORT_SYMBOL(dev_graft_qdisc);
 
 static void attach_one_default_qdisc(struct net_device *dev,
 				     struct netdev_queue *dev_queue,
 				     void *_unused)
 {
-	struct Qdisc *qdisc;
+	struct Qdisc *qdisc = &noqueue_qdisc;
 
 	if (dev->tx_queue_len) {
 		qdisc = qdisc_create_dflt(dev_queue,
 					  &pfifo_fast_ops, TC_H_ROOT);
 		if (!qdisc) {
-			printk(KERN_INFO "%s: activation failed\n", dev->name);
+			netdev_info(dev, "activation failed\n");
 			return;
 		}
-
-		/* Can by-pass the queue discipline for default qdisc */
-		qdisc->flags |= TCQ_F_CAN_BYPASS;
-	} else {
-		qdisc =  &noqueue_qdisc;
 	}
 	dev_queue->qdisc_sleeping = qdisc;
 }
@@ -761,6 +761,7 @@
 		dev_watchdog_up(dev);
 	}
 }
+EXPORT_SYMBOL(dev_activate);
 
 static void dev_deactivate_queue(struct net_device *dev,
 				 struct netdev_queue *dev_queue,
@@ -840,6 +841,7 @@
 	list_add(&dev->unreg_list, &single);
 	dev_deactivate_many(&single);
 }
+EXPORT_SYMBOL(dev_deactivate);
 
 static void dev_init_scheduler_queue(struct net_device *dev,
 				     struct netdev_queue *dev_queue,
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 51dcc2a..b9493a0 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -32,8 +32,7 @@
 struct gred_sched_data;
 struct gred_sched;
 
-struct gred_sched_data
-{
+struct gred_sched_data {
 	u32		limit;		/* HARD maximal queue length	*/
 	u32      	DP;		/* the drop pramaters */
 	u32		bytesin;	/* bytes seen on virtualQ so far*/
@@ -50,8 +49,7 @@
 	GRED_RIO_MODE,
 };
 
-struct gred_sched
-{
+struct gred_sched {
 	struct gred_sched_data *tab[MAX_DPs];
 	unsigned long	flags;
 	u32		red_flags;
@@ -150,17 +148,18 @@
 	return t->red_flags & TC_RED_HARDDROP;
 }
 
-static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
-	struct gred_sched_data *q=NULL;
-	struct gred_sched *t= qdisc_priv(sch);
+	struct gred_sched_data *q = NULL;
+	struct gred_sched *t = qdisc_priv(sch);
 	unsigned long qavg = 0;
 	u16 dp = tc_index_to_dp(skb);
 
-	if (dp >= t->DPs  || (q = t->tab[dp]) == NULL) {
+	if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
 		dp = t->def;
 
-		if ((q = t->tab[dp]) == NULL) {
+		q = t->tab[dp];
+		if (!q) {
 			/* Pass through packets not assigned to a DP
 			 * if no default DP has been configured. This
 			 * allows for DP flows to be left untouched.
@@ -183,7 +182,7 @@
 		for (i = 0; i < t->DPs; i++) {
 			if (t->tab[i] && t->tab[i]->prio < q->prio &&
 			    !red_is_idling(&t->tab[i]->parms))
-				qavg +=t->tab[i]->parms.qavg;
+				qavg += t->tab[i]->parms.qavg;
 		}
 
 	}
@@ -203,28 +202,28 @@
 		gred_store_wred_set(t, q);
 
 	switch (red_action(&q->parms, q->parms.qavg + qavg)) {
-		case RED_DONT_MARK:
-			break;
+	case RED_DONT_MARK:
+		break;
 
-		case RED_PROB_MARK:
-			sch->qstats.overlimits++;
-			if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
-				q->stats.prob_drop++;
-				goto congestion_drop;
-			}
+	case RED_PROB_MARK:
+		sch->qstats.overlimits++;
+		if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
+			q->stats.prob_drop++;
+			goto congestion_drop;
+		}
 
-			q->stats.prob_mark++;
-			break;
+		q->stats.prob_mark++;
+		break;
 
-		case RED_HARD_MARK:
-			sch->qstats.overlimits++;
-			if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
-			    !INET_ECN_set_ce(skb)) {
-				q->stats.forced_drop++;
-				goto congestion_drop;
-			}
-			q->stats.forced_mark++;
-			break;
+	case RED_HARD_MARK:
+		sch->qstats.overlimits++;
+		if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
+		    !INET_ECN_set_ce(skb)) {
+			q->stats.forced_drop++;
+			goto congestion_drop;
+		}
+		q->stats.forced_mark++;
+		break;
 	}
 
 	if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
@@ -241,7 +240,7 @@
 	return NET_XMIT_CN;
 }
 
-static struct sk_buff *gred_dequeue(struct Qdisc* sch)
+static struct sk_buff *gred_dequeue(struct Qdisc *sch)
 {
 	struct sk_buff *skb;
 	struct gred_sched *t = qdisc_priv(sch);
@@ -254,9 +253,9 @@
 
 		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
 			if (net_ratelimit())
-				printk(KERN_WARNING "GRED: Unable to relocate "
-				       "VQ 0x%x after dequeue, screwing up "
-				       "backlog.\n", tc_index_to_dp(skb));
+				pr_warning("GRED: Unable to relocate VQ 0x%x "
+					   "after dequeue, screwing up "
+					   "backlog.\n", tc_index_to_dp(skb));
 		} else {
 			q->backlog -= qdisc_pkt_len(skb);
 
@@ -273,7 +272,7 @@
 	return NULL;
 }
 
-static unsigned int gred_drop(struct Qdisc* sch)
+static unsigned int gred_drop(struct Qdisc *sch)
 {
 	struct sk_buff *skb;
 	struct gred_sched *t = qdisc_priv(sch);
@@ -286,9 +285,9 @@
 
 		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
 			if (net_ratelimit())
-				printk(KERN_WARNING "GRED: Unable to relocate "
-				       "VQ 0x%x while dropping, screwing up "
-				       "backlog.\n", tc_index_to_dp(skb));
+				pr_warning("GRED: Unable to relocate VQ 0x%x "
+					   "while dropping, screwing up "
+					   "backlog.\n", tc_index_to_dp(skb));
 		} else {
 			q->backlog -= len;
 			q->stats.other++;
@@ -308,7 +307,7 @@
 
 }
 
-static void gred_reset(struct Qdisc* sch)
+static void gred_reset(struct Qdisc *sch)
 {
 	int i;
 	struct gred_sched *t = qdisc_priv(sch);
@@ -369,8 +368,8 @@
 
 	for (i = table->DPs; i < MAX_DPs; i++) {
 		if (table->tab[i]) {
-			printk(KERN_WARNING "GRED: Warning: Destroying "
-			       "shadowed VQ 0x%x\n", i);
+			pr_warning("GRED: Warning: Destroying "
+				   "shadowed VQ 0x%x\n", i);
 			gred_destroy_vq(table->tab[i]);
 			table->tab[i] = NULL;
 		}
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 2e45791..6488e64 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -81,8 +81,7 @@
  *   that are expensive on 32-bit architectures.
  */
 
-struct internal_sc
-{
+struct internal_sc {
 	u64	sm1;	/* scaled slope of the 1st segment */
 	u64	ism1;	/* scaled inverse-slope of the 1st segment */
 	u64	dx;	/* the x-projection of the 1st segment */
@@ -92,8 +91,7 @@
 };
 
 /* runtime service curve */
-struct runtime_sc
-{
+struct runtime_sc {
 	u64	x;	/* current starting position on x-axis */
 	u64	y;	/* current starting position on y-axis */
 	u64	sm1;	/* scaled slope of the 1st segment */
@@ -104,15 +102,13 @@
 	u64	ism2;	/* scaled inverse-slope of the 2nd segment */
 };
 
-enum hfsc_class_flags
-{
+enum hfsc_class_flags {
 	HFSC_RSC = 0x1,
 	HFSC_FSC = 0x2,
 	HFSC_USC = 0x4
 };
 
-struct hfsc_class
-{
+struct hfsc_class {
 	struct Qdisc_class_common cl_common;
 	unsigned int	refcnt;		/* usage count */
 
@@ -140,8 +136,8 @@
 	u64	cl_cumul;		/* cumulative work in bytes done by
 					   real-time criteria */
 
-	u64 	cl_d;			/* deadline*/
-	u64 	cl_e;			/* eligible time */
+	u64	cl_d;			/* deadline*/
+	u64	cl_e;			/* eligible time */
 	u64	cl_vt;			/* virtual time */
 	u64	cl_f;			/* time when this class will fit for
 					   link-sharing, max(myf, cfmin) */
@@ -176,8 +172,7 @@
 	unsigned long	cl_nactive;	/* number of active children */
 };
 
-struct hfsc_sched
-{
+struct hfsc_sched {
 	u16	defcls;				/* default class id */
 	struct hfsc_class root;			/* root class */
 	struct Qdisc_class_hash clhash;		/* class hash */
@@ -693,7 +688,7 @@
 		if (go_active) {
 			n = rb_last(&cl->cl_parent->vt_tree);
 			if (n != NULL) {
-				max_cl = rb_entry(n, struct hfsc_class,vt_node);
+				max_cl = rb_entry(n, struct hfsc_class, vt_node);
 				/*
 				 * set vt to the average of the min and max
 				 * classes.  if the parent's period didn't
@@ -1177,8 +1172,10 @@
 			return NULL;
 		}
 #endif
-		if ((cl = (struct hfsc_class *)res.class) == NULL) {
-			if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
+		cl = (struct hfsc_class *)res.class;
+		if (!cl) {
+			cl = hfsc_find_class(res.classid, sch);
+			if (!cl)
 				break; /* filter selected invalid classid */
 			if (cl->level >= head->level)
 				break; /* filter may only point downwards */
@@ -1316,7 +1313,7 @@
 	return -1;
 }
 
-static inline int
+static int
 hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
 {
 	if ((cl->cl_flags & HFSC_RSC) &&
@@ -1420,7 +1417,8 @@
 	struct hfsc_class *cl;
 	u64 next_time = 0;
 
-	if ((cl = eltree_get_minel(q)) != NULL)
+	cl = eltree_get_minel(q);
+	if (cl)
 		next_time = cl->cl_e;
 	if (q->root.cl_cfmin != 0) {
 		if (next_time == 0 || next_time > q->root.cl_cfmin)
@@ -1600,7 +1598,6 @@
 		set_active(cl, qdisc_pkt_len(skb));
 
 	bstats_update(&cl->bstats, skb);
-	qdisc_bstats_update(sch, skb);
 	sch->q.qlen++;
 
 	return NET_XMIT_SUCCESS;
@@ -1626,7 +1623,8 @@
 	 * find the class with the minimum deadline among
 	 * the eligible classes.
 	 */
-	if ((cl = eltree_get_mindl(q, cur_time)) != NULL) {
+	cl = eltree_get_mindl(q, cur_time);
+	if (cl) {
 		realtime = 1;
 	} else {
 		/*
@@ -1665,7 +1663,8 @@
 		set_passive(cl);
 	}
 
-	sch->flags &= ~TCQ_F_THROTTLED;
+	qdisc_unthrottled(sch);
+	qdisc_bstats_update(sch, skb);
 	sch->q.qlen--;
 
 	return skb;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 984c1b0..e1429a8 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -99,9 +99,10 @@
 			struct rb_root feed[TC_HTB_NUMPRIO];	/* feed trees */
 			struct rb_node *ptr[TC_HTB_NUMPRIO];	/* current class ptr */
 			/* When class changes from state 1->2 and disconnects from
-			   parent's feed then we lost ptr value and start from the
-			   first child again. Here we store classid of the
-			   last valid ptr (used when ptr is NULL). */
+			 * parent's feed then we lost ptr value and start from the
+			 * first child again. Here we store classid of the
+			 * last valid ptr (used when ptr is NULL).
+			 */
 			u32 last_ptr_id[TC_HTB_NUMPRIO];
 		} inner;
 	} un;
@@ -185,7 +186,7 @@
  * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull
  * then finish and return direct queue.
  */
-#define HTB_DIRECT (struct htb_class*)-1
+#define HTB_DIRECT ((struct htb_class *)-1L)
 
 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
 				      int *qerr)
@@ -197,11 +198,13 @@
 	int result;
 
 	/* allow to select class by setting skb->priority to valid classid;
-	   note that nfmark can be used too by attaching filter fw with no
-	   rules in it */
+	 * note that nfmark can be used too by attaching filter fw with no
+	 * rules in it
+	 */
 	if (skb->priority == sch->handle)
 		return HTB_DIRECT;	/* X:0 (direct flow) selected */
-	if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
+	cl = htb_find(skb->priority, sch);
+	if (cl && cl->level == 0)
 		return cl;
 
 	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
@@ -216,10 +219,12 @@
 			return NULL;
 		}
 #endif
-		if ((cl = (void *)res.class) == NULL) {
+		cl = (void *)res.class;
+		if (!cl) {
 			if (res.classid == sch->handle)
 				return HTB_DIRECT;	/* X:0 (direct flow) */
-			if ((cl = htb_find(res.classid, sch)) == NULL)
+			cl = htb_find(res.classid, sch);
+			if (!cl)
 				break;	/* filter selected invalid classid */
 		}
 		if (!cl->level)
@@ -378,7 +383,8 @@
 
 			if (p->un.inner.feed[prio].rb_node)
 				/* parent already has its feed in use so that
-				   reset bit in mask as parent is already ok */
+				 * reset bit in mask as parent is already ok
+				 */
 				mask &= ~(1 << prio);
 
 			htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
@@ -413,8 +419,9 @@
 
 			if (p->un.inner.ptr[prio] == cl->node + prio) {
 				/* we are removing child which is pointed to from
-				   parent feed - forget the pointer but remember
-				   classid */
+				 * parent feed - forget the pointer but remember
+				 * classid
+				 */
 				p->un.inner.last_ptr_id[prio] = cl->common.classid;
 				p->un.inner.ptr[prio] = NULL;
 			}
@@ -574,7 +581,6 @@
 	}
 
 	sch->q.qlen++;
-	qdisc_bstats_update(sch, skb);
 	return NET_XMIT_SUCCESS;
 }
 
@@ -664,8 +670,9 @@
 				   unsigned long start)
 {
 	/* don't run for longer than 2 jiffies; 2 is used instead of
-	   1 to simplify things when jiffy is going to be incremented
-	   too soon */
+	 * 1 to simplify things when jiffy is going to be incremented
+	 * too soon
+	 */
 	unsigned long stop_at = start + 2;
 	while (time_before(jiffies, stop_at)) {
 		struct htb_class *cl;
@@ -688,7 +695,7 @@
 
 	/* too much load - let's continue after a break for scheduling */
 	if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
-		printk(KERN_WARNING "htb: too many events!\n");
+		pr_warning("htb: too many events!\n");
 		q->warned |= HTB_WARN_TOOMANYEVENTS;
 	}
 
@@ -696,7 +703,8 @@
 }
 
 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
-   is no such one exists. */
+ * is no such one exists.
+ */
 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
 					      u32 id)
 {
@@ -740,12 +748,14 @@
 	for (i = 0; i < 65535; i++) {
 		if (!*sp->pptr && *sp->pid) {
 			/* ptr was invalidated but id is valid - try to recover
-			   the original or next ptr */
+			 * the original or next ptr
+			 */
 			*sp->pptr =
 			    htb_id_find_next_upper(prio, sp->root, *sp->pid);
 		}
 		*sp->pid = 0;	/* ptr is valid now so that remove this hint as it
-				   can become out of date quickly */
+				 * can become out of date quickly
+				 */
 		if (!*sp->pptr) {	/* we are at right end; rewind & go up */
 			*sp->pptr = sp->root;
 			while ((*sp->pptr)->rb_left)
@@ -773,7 +783,8 @@
 }
 
 /* dequeues packet at given priority and level; call only if
-   you are sure that there is active class at prio/level */
+ * you are sure that there is active class at prio/level
+ */
 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
 					int level)
 {
@@ -790,9 +801,10 @@
 			return NULL;
 
 		/* class can be empty - it is unlikely but can be true if leaf
-		   qdisc drops packets in enqueue routine or if someone used
-		   graft operation on the leaf since last dequeue;
-		   simply deactivate and skip such class */
+		 * qdisc drops packets in enqueue routine or if someone used
+		 * graft operation on the leaf since last dequeue;
+		 * simply deactivate and skip such class
+		 */
 		if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
 			struct htb_class *next;
 			htb_deactivate(q, cl);
@@ -832,7 +844,8 @@
 					  ptr[0]) + prio);
 		}
 		/* this used to be after charge_class but this constelation
-		   gives us slightly better performance */
+		 * gives us slightly better performance
+		 */
 		if (!cl->un.leaf.q->q.qlen)
 			htb_deactivate(q, cl);
 		htb_charge_class(q, cl, level, skb);
@@ -842,7 +855,7 @@
 
 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
 {
-	struct sk_buff *skb = NULL;
+	struct sk_buff *skb;
 	struct htb_sched *q = qdisc_priv(sch);
 	int level;
 	psched_time_t next_event;
@@ -851,7 +864,9 @@
 	/* try to dequeue direct packets as high prio (!) to minimize cpu work */
 	skb = __skb_dequeue(&q->direct_queue);
 	if (skb != NULL) {
-		sch->flags &= ~TCQ_F_THROTTLED;
+ok:
+		qdisc_bstats_update(sch, skb);
+		qdisc_unthrottled(sch);
 		sch->q.qlen--;
 		return skb;
 	}
@@ -882,13 +897,11 @@
 		m = ~q->row_mask[level];
 		while (m != (int)(-1)) {
 			int prio = ffz(m);
+
 			m |= 1 << prio;
 			skb = htb_dequeue_tree(q, prio, level);
-			if (likely(skb != NULL)) {
-				sch->q.qlen--;
-				sch->flags &= ~TCQ_F_THROTTLED;
-				goto fin;
-			}
+			if (likely(skb != NULL))
+				goto ok;
 		}
 	}
 	sch->qstats.overlimits++;
@@ -989,13 +1002,12 @@
 		return err;
 
 	if (tb[TCA_HTB_INIT] == NULL) {
-		printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
+		pr_err("HTB: hey probably you have bad tc tool ?\n");
 		return -EINVAL;
 	}
 	gopt = nla_data(tb[TCA_HTB_INIT]);
 	if (gopt->version != HTB_VER >> 16) {
-		printk(KERN_ERR
-		       "HTB: need tc/htb version %d (minor is %d), you have %d\n",
+		pr_err("HTB: need tc/htb version %d (minor is %d), you have %d\n",
 		       HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
 		return -EINVAL;
 	}
@@ -1208,9 +1220,10 @@
 	cancel_work_sync(&q->work);
 	qdisc_watchdog_cancel(&q->watchdog);
 	/* This line used to be after htb_destroy_class call below
-	   and surprisingly it worked in 2.4. But it must precede it
-	   because filter need its target class alive to be able to call
-	   unbind_filter on it (without Oops). */
+	 * and surprisingly it worked in 2.4. But it must precede it
+	 * because filter need its target class alive to be able to call
+	 * unbind_filter on it (without Oops).
+	 */
 	tcf_destroy_chain(&q->filter_list);
 
 	for (i = 0; i < q->clhash.hashsize; i++) {
@@ -1344,11 +1357,12 @@
 
 		/* check maximal depth */
 		if (parent && parent->parent && parent->parent->level < 2) {
-			printk(KERN_ERR "htb: tree is too deep\n");
+			pr_err("htb: tree is too deep\n");
 			goto failure;
 		}
 		err = -ENOBUFS;
-		if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
+		cl = kzalloc(sizeof(*cl), GFP_KERNEL);
+		if (!cl)
 			goto failure;
 
 		err = gen_new_estimator(&cl->bstats, &cl->rate_est,
@@ -1368,8 +1382,9 @@
 			RB_CLEAR_NODE(&cl->node[prio]);
 
 		/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
-		   so that can't be used inside of sch_tree_lock
-		   -- thanks to Karlis Peisenieks */
+		 * so that can't be used inside of sch_tree_lock
+		 * -- thanks to Karlis Peisenieks
+		 */
 		new_q = qdisc_create_dflt(sch->dev_queue,
 					  &pfifo_qdisc_ops, classid);
 		sch_tree_lock(sch);
@@ -1421,17 +1436,18 @@
 	}
 
 	/* it used to be a nasty bug here, we have to check that node
-	   is really leaf before changing cl->un.leaf ! */
+	 * is really leaf before changing cl->un.leaf !
+	 */
 	if (!cl->level) {
 		cl->quantum = rtab->rate.rate / q->rate2quantum;
 		if (!hopt->quantum && cl->quantum < 1000) {
-			printk(KERN_WARNING
+			pr_warning(
 			       "HTB: quantum of class %X is small. Consider r2q change.\n",
 			       cl->common.classid);
 			cl->quantum = 1000;
 		}
 		if (!hopt->quantum && cl->quantum > 200000) {
-			printk(KERN_WARNING
+			pr_warning(
 			       "HTB: quantum of class %X is big. Consider r2q change.\n",
 			       cl->common.classid);
 			cl->quantum = 200000;
@@ -1480,13 +1496,13 @@
 	struct htb_class *cl = htb_find(classid, sch);
 
 	/*if (cl && !cl->level) return 0;
-	   The line above used to be there to prevent attaching filters to
-	   leaves. But at least tc_index filter uses this just to get class
-	   for other reasons so that we have to allow for it.
-	   ----
-	   19.6.2002 As Werner explained it is ok - bind filter is just
-	   another way to "lock" the class - unlike "get" this lock can
-	   be broken by class during destroy IIUC.
+	 * The line above used to be there to prevent attaching filters to
+	 * leaves. But at least tc_index filter uses this just to get class
+	 * for other reasons so that we have to allow for it.
+	 * ----
+	 * 19.6.2002 As Werner explained it is ok - bind filter is just
+	 * another way to "lock" the class - unlike "get" this lock can
+	 * be broken by class during destroy IIUC.
 	 */
 	if (cl)
 		cl->filter_cnt++;
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index ecc302f..ec5cbc8 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -61,7 +61,6 @@
 						    TC_H_MIN(ntx + 1)));
 		if (qdisc == NULL)
 			goto err;
-		qdisc->flags |= TCQ_F_CAN_BYPASS;
 		priv->qdiscs[ntx] = qdisc;
 	}
 
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
new file mode 100644
index 0000000..ace37f9
--- /dev/null
+++ b/net/sched/sch_mqprio.c
@@ -0,0 +1,416 @@
+/*
+ * net/sched/sch_mqprio.c
+ *
+ * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <net/sch_generic.h>
+
+struct mqprio_sched {
+	struct Qdisc		**qdiscs;
+	int hw_owned;
+};
+
+static void mqprio_destroy(struct Qdisc *sch)
+{
+	struct net_device *dev = qdisc_dev(sch);
+	struct mqprio_sched *priv = qdisc_priv(sch);
+	unsigned int ntx;
+
+	if (priv->qdiscs) {
+		for (ntx = 0;
+		     ntx < dev->num_tx_queues && priv->qdiscs[ntx];
+		     ntx++)
+			qdisc_destroy(priv->qdiscs[ntx]);
+		kfree(priv->qdiscs);
+	}
+
+	if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc)
+		dev->netdev_ops->ndo_setup_tc(dev, 0);
+	else
+		netdev_set_num_tc(dev, 0);
+}
+
+static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
+{
+	int i, j;
+
+	/* Verify num_tc is not out of max range */
+	if (qopt->num_tc > TC_MAX_QUEUE)
+		return -EINVAL;
+
+	/* Verify priority mapping uses valid tcs */
+	for (i = 0; i < TC_BITMASK + 1; i++) {
+		if (qopt->prio_tc_map[i] >= qopt->num_tc)
+			return -EINVAL;
+	}
+
+	/* net_device does not support requested operation */
+	if (qopt->hw && !dev->netdev_ops->ndo_setup_tc)
+		return -EINVAL;
+
+	/* if hw owned qcount and qoffset are taken from LLD so
+	 * no reason to verify them here
+	 */
+	if (qopt->hw)
+		return 0;
+
+	for (i = 0; i < qopt->num_tc; i++) {
+		unsigned int last = qopt->offset[i] + qopt->count[i];
+
+		/* Verify the queue count is in tx range being equal to the
+		 * real_num_tx_queues indicates the last queue is in use.
+		 */
+		if (qopt->offset[i] >= dev->real_num_tx_queues ||
+		    !qopt->count[i] ||
+		    last > dev->real_num_tx_queues)
+			return -EINVAL;
+
+		/* Verify that the offset and counts do not overlap */
+		for (j = i + 1; j < qopt->num_tc; j++) {
+			if (last > qopt->offset[j])
+				return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
+{
+	struct net_device *dev = qdisc_dev(sch);
+	struct mqprio_sched *priv = qdisc_priv(sch);
+	struct netdev_queue *dev_queue;
+	struct Qdisc *qdisc;
+	int i, err = -EOPNOTSUPP;
+	struct tc_mqprio_qopt *qopt = NULL;
+
+	BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
+	BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
+
+	if (sch->parent != TC_H_ROOT)
+		return -EOPNOTSUPP;
+
+	if (!netif_is_multiqueue(dev))
+		return -EOPNOTSUPP;
+
+	if (nla_len(opt) < sizeof(*qopt))
+		return -EINVAL;
+
+	qopt = nla_data(opt);
+	if (mqprio_parse_opt(dev, qopt))
+		return -EINVAL;
+
+	/* pre-allocate qdisc, attachment can't fail */
+	priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
+			       GFP_KERNEL);
+	if (priv->qdiscs == NULL) {
+		err = -ENOMEM;
+		goto err;
+	}
+
+	for (i = 0; i < dev->num_tx_queues; i++) {
+		dev_queue = netdev_get_tx_queue(dev, i);
+		qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops,
+					  TC_H_MAKE(TC_H_MAJ(sch->handle),
+						    TC_H_MIN(i + 1)));
+		if (qdisc == NULL) {
+			err = -ENOMEM;
+			goto err;
+		}
+		priv->qdiscs[i] = qdisc;
+	}
+
+	/* If the mqprio options indicate that hardware should own
+	 * the queue mapping then run ndo_setup_tc otherwise use the
+	 * supplied and verified mapping
+	 */
+	if (qopt->hw) {
+		priv->hw_owned = 1;
+		err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
+		if (err)
+			goto err;
+	} else {
+		netdev_set_num_tc(dev, qopt->num_tc);
+		for (i = 0; i < qopt->num_tc; i++)
+			netdev_set_tc_queue(dev, i,
+					    qopt->count[i], qopt->offset[i]);
+	}
+
+	/* Always use supplied priority mappings */
+	for (i = 0; i < TC_BITMASK + 1; i++)
+		netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
+
+	sch->flags |= TCQ_F_MQROOT;
+	return 0;
+
+err:
+	mqprio_destroy(sch);
+	return err;
+}
+
+static void mqprio_attach(struct Qdisc *sch)
+{
+	struct net_device *dev = qdisc_dev(sch);
+	struct mqprio_sched *priv = qdisc_priv(sch);
+	struct Qdisc *qdisc;
+	unsigned int ntx;
+
+	/* Attach underlying qdisc */
+	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+		qdisc = priv->qdiscs[ntx];
+		qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
+		if (qdisc)
+			qdisc_destroy(qdisc);
+	}
+	kfree(priv->qdiscs);
+	priv->qdiscs = NULL;
+}
+
+static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
+					     unsigned long cl)
+{
+	struct net_device *dev = qdisc_dev(sch);
+	unsigned long ntx = cl - 1 - netdev_get_num_tc(dev);
+
+	if (ntx >= dev->num_tx_queues)
+		return NULL;
+	return netdev_get_tx_queue(dev, ntx);
+}
+
+static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
+		    struct Qdisc **old)
+{
+	struct net_device *dev = qdisc_dev(sch);
+	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+
+	if (!dev_queue)
+		return -EINVAL;
+
+	if (dev->flags & IFF_UP)
+		dev_deactivate(dev);
+
+	*old = dev_graft_qdisc(dev_queue, new);
+
+	if (dev->flags & IFF_UP)
+		dev_activate(dev);
+
+	return 0;
+}
+
+static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+	struct net_device *dev = qdisc_dev(sch);
+	struct mqprio_sched *priv = qdisc_priv(sch);
+	unsigned char *b = skb_tail_pointer(skb);
+	struct tc_mqprio_qopt opt = { 0 };
+	struct Qdisc *qdisc;
+	unsigned int i;
+
+	sch->q.qlen = 0;
+	memset(&sch->bstats, 0, sizeof(sch->bstats));
+	memset(&sch->qstats, 0, sizeof(sch->qstats));
+
+	for (i = 0; i < dev->num_tx_queues; i++) {
+		qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+		spin_lock_bh(qdisc_lock(qdisc));
+		sch->q.qlen		+= qdisc->q.qlen;
+		sch->bstats.bytes	+= qdisc->bstats.bytes;
+		sch->bstats.packets	+= qdisc->bstats.packets;
+		sch->qstats.qlen	+= qdisc->qstats.qlen;
+		sch->qstats.backlog	+= qdisc->qstats.backlog;
+		sch->qstats.drops	+= qdisc->qstats.drops;
+		sch->qstats.requeues	+= qdisc->qstats.requeues;
+		sch->qstats.overlimits	+= qdisc->qstats.overlimits;
+		spin_unlock_bh(qdisc_lock(qdisc));
+	}
+
+	opt.num_tc = netdev_get_num_tc(dev);
+	memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
+	opt.hw = priv->hw_owned;
+
+	for (i = 0; i < netdev_get_num_tc(dev); i++) {
+		opt.count[i] = dev->tc_to_txq[i].count;
+		opt.offset[i] = dev->tc_to_txq[i].offset;
+	}
+
+	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+
+	return skb->len;
+nla_put_failure:
+	nlmsg_trim(skb, b);
+	return -1;
+}
+
+static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
+{
+	struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+
+	if (!dev_queue)
+		return NULL;
+
+	return dev_queue->qdisc_sleeping;
+}
+
+static unsigned long mqprio_get(struct Qdisc *sch, u32 classid)
+{
+	struct net_device *dev = qdisc_dev(sch);
+	unsigned int ntx = TC_H_MIN(classid);
+
+	if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev))
+		return 0;
+	return ntx;
+}
+
+static void mqprio_put(struct Qdisc *sch, unsigned long cl)
+{
+}
+
+static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
+			 struct sk_buff *skb, struct tcmsg *tcm)
+{
+	struct net_device *dev = qdisc_dev(sch);
+
+	if (cl <= netdev_get_num_tc(dev)) {
+		tcm->tcm_parent = TC_H_ROOT;
+		tcm->tcm_info = 0;
+	} else {
+		int i;
+		struct netdev_queue *dev_queue;
+
+		dev_queue = mqprio_queue_get(sch, cl);
+		tcm->tcm_parent = 0;
+		for (i = 0; i < netdev_get_num_tc(dev); i++) {
+			struct netdev_tc_txq tc = dev->tc_to_txq[i];
+			int q_idx = cl - netdev_get_num_tc(dev);
+
+			if (q_idx > tc.offset &&
+			    q_idx <= tc.offset + tc.count) {
+				tcm->tcm_parent =
+					TC_H_MAKE(TC_H_MAJ(sch->handle),
+						  TC_H_MIN(i + 1));
+				break;
+			}
+		}
+		tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
+	}
+	tcm->tcm_handle |= TC_H_MIN(cl);
+	return 0;
+}
+
+static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+			       struct gnet_dump *d)
+{
+	struct net_device *dev = qdisc_dev(sch);
+
+	if (cl <= netdev_get_num_tc(dev)) {
+		int i;
+		struct Qdisc *qdisc;
+		struct gnet_stats_queue qstats = {0};
+		struct gnet_stats_basic_packed bstats = {0};
+		struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1];
+
+		/* Drop lock here it will be reclaimed before touching
+		 * statistics this is required because the d->lock we
+		 * hold here is the look on dev_queue->qdisc_sleeping
+		 * also acquired below.
+		 */
+		spin_unlock_bh(d->lock);
+
+		for (i = tc.offset; i < tc.offset + tc.count; i++) {
+			qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+			spin_lock_bh(qdisc_lock(qdisc));
+			bstats.bytes      += qdisc->bstats.bytes;
+			bstats.packets    += qdisc->bstats.packets;
+			qstats.qlen       += qdisc->qstats.qlen;
+			qstats.backlog    += qdisc->qstats.backlog;
+			qstats.drops      += qdisc->qstats.drops;
+			qstats.requeues   += qdisc->qstats.requeues;
+			qstats.overlimits += qdisc->qstats.overlimits;
+			spin_unlock_bh(qdisc_lock(qdisc));
+		}
+		/* Reclaim root sleeping lock before completing stats */
+		spin_lock_bh(d->lock);
+		if (gnet_stats_copy_basic(d, &bstats) < 0 ||
+		    gnet_stats_copy_queue(d, &qstats) < 0)
+			return -1;
+	} else {
+		struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
+
+		sch = dev_queue->qdisc_sleeping;
+		sch->qstats.qlen = sch->q.qlen;
+		if (gnet_stats_copy_basic(d, &sch->bstats) < 0 ||
+		    gnet_stats_copy_queue(d, &sch->qstats) < 0)
+			return -1;
+	}
+	return 0;
+}
+
+static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+	struct net_device *dev = qdisc_dev(sch);
+	unsigned long ntx;
+
+	if (arg->stop)
+		return;
+
+	/* Walk hierarchy with a virtual class per tc */
+	arg->count = arg->skip;
+	for (ntx = arg->skip;
+	     ntx < dev->num_tx_queues + netdev_get_num_tc(dev);
+	     ntx++) {
+		if (arg->fn(sch, ntx + 1, arg) < 0) {
+			arg->stop = 1;
+			break;
+		}
+		arg->count++;
+	}
+}
+
+static const struct Qdisc_class_ops mqprio_class_ops = {
+	.graft		= mqprio_graft,
+	.leaf		= mqprio_leaf,
+	.get		= mqprio_get,
+	.put		= mqprio_put,
+	.walk		= mqprio_walk,
+	.dump		= mqprio_dump_class,
+	.dump_stats	= mqprio_dump_class_stats,
+};
+
+struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
+	.cl_ops		= &mqprio_class_ops,
+	.id		= "mqprio",
+	.priv_size	= sizeof(struct mqprio_sched),
+	.init		= mqprio_init,
+	.destroy	= mqprio_destroy,
+	.attach		= mqprio_attach,
+	.dump		= mqprio_dump,
+	.owner		= THIS_MODULE,
+};
+
+static int __init mqprio_module_init(void)
+{
+	return register_qdisc(&mqprio_qdisc_ops);
+}
+
+static void __exit mqprio_module_exit(void)
+{
+	unregister_qdisc(&mqprio_qdisc_ops);
+}
+
+module_init(mqprio_module_init);
+module_exit(mqprio_module_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 21f13da..edc1950 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -83,7 +83,6 @@
 
 	ret = qdisc_enqueue(skb, qdisc);
 	if (ret == NET_XMIT_SUCCESS) {
-		qdisc_bstats_update(sch, skb);
 		sch->q.qlen++;
 		return NET_XMIT_SUCCESS;
 	}
@@ -112,6 +111,7 @@
 			qdisc = q->queues[q->curband];
 			skb = qdisc->dequeue(qdisc);
 			if (skb) {
+				qdisc_bstats_update(sch, skb);
 				sch->q.qlen--;
 				return skb;
 			}
@@ -156,7 +156,7 @@
 	unsigned int len;
 	struct Qdisc *qdisc;
 
-	for (band = q->bands-1; band >= 0; band--) {
+	for (band = q->bands - 1; band >= 0; band--) {
 		qdisc = q->queues[band];
 		if (qdisc->ops->drop) {
 			len = qdisc->ops->drop(qdisc);
@@ -265,7 +265,7 @@
 	for (i = 0; i < q->max_bands; i++)
 		q->queues[i] = &noop_qdisc;
 
-	err = multiq_tune(sch,opt);
+	err = multiq_tune(sch, opt);
 
 	if (err)
 		kfree(q->queues);
@@ -346,7 +346,7 @@
 	struct multiq_sched_data *q = qdisc_priv(sch);
 
 	tcm->tcm_handle |= TC_H_MIN(cl);
-	tcm->tcm_info = q->queues[cl-1]->handle;
+	tcm->tcm_info = q->queues[cl - 1]->handle;
 	return 0;
 }
 
@@ -378,7 +378,7 @@
 			arg->count++;
 			continue;
 		}
-		if (arg->fn(sch, band+1, arg) < 0) {
+		if (arg->fn(sch, band + 1, arg) < 0) {
 			arg->stop = 1;
 			break;
 		}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 1c4bce8..64f0d32 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -211,8 +211,8 @@
 	}
 
 	cb = netem_skb_cb(skb);
-	if (q->gap == 0 || 		/* not doing reordering */
-	    q->counter < q->gap || 	/* inside last reordering gap */
+	if (q->gap == 0 ||		/* not doing reordering */
+	    q->counter < q->gap ||	/* inside last reordering gap */
 	    q->reorder < get_crandom(&q->reorder_cor)) {
 		psched_time_t now;
 		psched_tdiff_t delay;
@@ -240,7 +240,6 @@
 
 	if (likely(ret == NET_XMIT_SUCCESS)) {
 		sch->q.qlen++;
-		qdisc_bstats_update(sch, skb);
 	} else if (net_xmit_drop_count(ret)) {
 		sch->qstats.drops++;
 	}
@@ -249,7 +248,7 @@
 	return ret;
 }
 
-static unsigned int netem_drop(struct Qdisc* sch)
+static unsigned int netem_drop(struct Qdisc *sch)
 {
 	struct netem_sched_data *q = qdisc_priv(sch);
 	unsigned int len = 0;
@@ -266,7 +265,7 @@
 	struct netem_sched_data *q = qdisc_priv(sch);
 	struct sk_buff *skb;
 
-	if (sch->flags & TCQ_F_THROTTLED)
+	if (qdisc_is_throttled(sch))
 		return NULL;
 
 	skb = q->qdisc->ops->peek(q->qdisc);
@@ -289,6 +288,7 @@
 				skb->tstamp.tv64 = 0;
 #endif
 			pr_debug("netem_dequeue: return skb=%p\n", skb);
+			qdisc_bstats_update(sch, skb);
 			sch->q.qlen--;
 			return skb;
 		}
@@ -476,7 +476,6 @@
 		__skb_queue_after(list, skb, nskb);
 
 		sch->qstats.backlog += qdisc_pkt_len(nskb);
-		qdisc_bstats_update(sch, nskb);
 
 		return NET_XMIT_SUCCESS;
 	}
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 966158d..2a318f2 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -22,8 +22,7 @@
 #include <net/pkt_sched.h>
 
 
-struct prio_sched_data
-{
+struct prio_sched_data {
 	int bands;
 	struct tcf_proto *filter_list;
 	u8  prio2band[TC_PRIO_MAX+1];
@@ -54,7 +53,7 @@
 		if (!q->filter_list || err < 0) {
 			if (TC_H_MAJ(band))
 				band = 0;
-			return q->queues[q->prio2band[band&TC_PRIO_MAX]];
+			return q->queues[q->prio2band[band & TC_PRIO_MAX]];
 		}
 		band = res.classid;
 	}
@@ -84,7 +83,6 @@
 
 	ret = qdisc_enqueue(skb, qdisc);
 	if (ret == NET_XMIT_SUCCESS) {
-		qdisc_bstats_update(sch, skb);
 		sch->q.qlen++;
 		return NET_XMIT_SUCCESS;
 	}
@@ -107,7 +105,7 @@
 	return NULL;
 }
 
-static struct sk_buff *prio_dequeue(struct Qdisc* sch)
+static struct sk_buff *prio_dequeue(struct Qdisc *sch)
 {
 	struct prio_sched_data *q = qdisc_priv(sch);
 	int prio;
@@ -116,6 +114,7 @@
 		struct Qdisc *qdisc = q->queues[prio];
 		struct sk_buff *skb = qdisc->dequeue(qdisc);
 		if (skb) {
+			qdisc_bstats_update(sch, skb);
 			sch->q.qlen--;
 			return skb;
 		}
@@ -124,7 +123,7 @@
 
 }
 
-static unsigned int prio_drop(struct Qdisc* sch)
+static unsigned int prio_drop(struct Qdisc *sch)
 {
 	struct prio_sched_data *q = qdisc_priv(sch);
 	int prio;
@@ -143,24 +142,24 @@
 
 
 static void
-prio_reset(struct Qdisc* sch)
+prio_reset(struct Qdisc *sch)
 {
 	int prio;
 	struct prio_sched_data *q = qdisc_priv(sch);
 
-	for (prio=0; prio<q->bands; prio++)
+	for (prio = 0; prio < q->bands; prio++)
 		qdisc_reset(q->queues[prio]);
 	sch->q.qlen = 0;
 }
 
 static void
-prio_destroy(struct Qdisc* sch)
+prio_destroy(struct Qdisc *sch)
 {
 	int prio;
 	struct prio_sched_data *q = qdisc_priv(sch);
 
 	tcf_destroy_chain(&q->filter_list);
-	for (prio=0; prio<q->bands; prio++)
+	for (prio = 0; prio < q->bands; prio++)
 		qdisc_destroy(q->queues[prio]);
 }
 
@@ -177,7 +176,7 @@
 	if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
 		return -EINVAL;
 
-	for (i=0; i<=TC_PRIO_MAX; i++) {
+	for (i = 0; i <= TC_PRIO_MAX; i++) {
 		if (qopt->priomap[i] >= qopt->bands)
 			return -EINVAL;
 	}
@@ -186,7 +185,7 @@
 	q->bands = qopt->bands;
 	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
 
-	for (i=q->bands; i<TCQ_PRIO_BANDS; i++) {
+	for (i = q->bands; i < TCQ_PRIO_BANDS; i++) {
 		struct Qdisc *child = q->queues[i];
 		q->queues[i] = &noop_qdisc;
 		if (child != &noop_qdisc) {
@@ -196,9 +195,10 @@
 	}
 	sch_tree_unlock(sch);
 
-	for (i=0; i<q->bands; i++) {
+	for (i = 0; i < q->bands; i++) {
 		if (q->queues[i] == &noop_qdisc) {
 			struct Qdisc *child, *old;
+
 			child = qdisc_create_dflt(sch->dev_queue,
 						  &pfifo_qdisc_ops,
 						  TC_H_MAKE(sch->handle, i + 1));
@@ -224,7 +224,7 @@
 	struct prio_sched_data *q = qdisc_priv(sch);
 	int i;
 
-	for (i=0; i<TCQ_PRIO_BANDS; i++)
+	for (i = 0; i < TCQ_PRIO_BANDS; i++)
 		q->queues[i] = &noop_qdisc;
 
 	if (opt == NULL) {
@@ -232,7 +232,7 @@
 	} else {
 		int err;
 
-		if ((err= prio_tune(sch, opt)) != 0)
+		if ((err = prio_tune(sch, opt)) != 0)
 			return err;
 	}
 	return 0;
@@ -245,7 +245,7 @@
 	struct tc_prio_qopt opt;
 
 	opt.bands = q->bands;
-	memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1);
+	memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
 
 	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
 
@@ -342,7 +342,7 @@
 			arg->count++;
 			continue;
 		}
-		if (arg->fn(sch, prio+1, arg) < 0) {
+		if (arg->fn(sch, prio + 1, arg) < 0) {
 			arg->stop = 1;
 			break;
 		}
@@ -350,7 +350,7 @@
 	}
 }
 
-static struct tcf_proto ** prio_find_tcf(struct Qdisc *sch, unsigned long cl)
+static struct tcf_proto **prio_find_tcf(struct Qdisc *sch, unsigned long cl)
 {
 	struct prio_sched_data *q = qdisc_priv(sch);
 
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index a6009c5..6649463 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -36,8 +36,7 @@
 	if RED works correctly.
  */
 
-struct red_sched_data
-{
+struct red_sched_data {
 	u32			limit;		/* HARD maximal queue length */
 	unsigned char		flags;
 	struct red_parms	parms;
@@ -55,7 +54,7 @@
 	return q->flags & TC_RED_HARDDROP;
 }
 
-static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 	struct Qdisc *child = q->qdisc;
@@ -67,34 +66,33 @@
 		red_end_of_idle_period(&q->parms);
 
 	switch (red_action(&q->parms, q->parms.qavg)) {
-		case RED_DONT_MARK:
-			break;
+	case RED_DONT_MARK:
+		break;
 
-		case RED_PROB_MARK:
-			sch->qstats.overlimits++;
-			if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
-				q->stats.prob_drop++;
-				goto congestion_drop;
-			}
+	case RED_PROB_MARK:
+		sch->qstats.overlimits++;
+		if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
+			q->stats.prob_drop++;
+			goto congestion_drop;
+		}
 
-			q->stats.prob_mark++;
-			break;
+		q->stats.prob_mark++;
+		break;
 
-		case RED_HARD_MARK:
-			sch->qstats.overlimits++;
-			if (red_use_harddrop(q) || !red_use_ecn(q) ||
-			    !INET_ECN_set_ce(skb)) {
-				q->stats.forced_drop++;
-				goto congestion_drop;
-			}
+	case RED_HARD_MARK:
+		sch->qstats.overlimits++;
+		if (red_use_harddrop(q) || !red_use_ecn(q) ||
+		    !INET_ECN_set_ce(skb)) {
+			q->stats.forced_drop++;
+			goto congestion_drop;
+		}
 
-			q->stats.forced_mark++;
-			break;
+		q->stats.forced_mark++;
+		break;
 	}
 
 	ret = qdisc_enqueue(skb, child);
 	if (likely(ret == NET_XMIT_SUCCESS)) {
-		qdisc_bstats_update(sch, skb);
 		sch->q.qlen++;
 	} else if (net_xmit_drop_count(ret)) {
 		q->stats.pdrop++;
@@ -107,22 +105,24 @@
 	return NET_XMIT_CN;
 }
 
-static struct sk_buff * red_dequeue(struct Qdisc* sch)
+static struct sk_buff *red_dequeue(struct Qdisc *sch)
 {
 	struct sk_buff *skb;
 	struct red_sched_data *q = qdisc_priv(sch);
 	struct Qdisc *child = q->qdisc;
 
 	skb = child->dequeue(child);
-	if (skb)
+	if (skb) {
+		qdisc_bstats_update(sch, skb);
 		sch->q.qlen--;
-	else if (!red_is_idling(&q->parms))
-		red_start_of_idle_period(&q->parms);
-
+	} else {
+		if (!red_is_idling(&q->parms))
+			red_start_of_idle_period(&q->parms);
+	}
 	return skb;
 }
 
-static struct sk_buff * red_peek(struct Qdisc* sch)
+static struct sk_buff *red_peek(struct Qdisc *sch)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 	struct Qdisc *child = q->qdisc;
@@ -130,7 +130,7 @@
 	return child->ops->peek(child);
 }
 
-static unsigned int red_drop(struct Qdisc* sch)
+static unsigned int red_drop(struct Qdisc *sch)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 	struct Qdisc *child = q->qdisc;
@@ -149,7 +149,7 @@
 	return 0;
 }
 
-static void red_reset(struct Qdisc* sch)
+static void red_reset(struct Qdisc *sch)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 
@@ -216,7 +216,7 @@
 	return 0;
 }
 
-static int red_init(struct Qdisc* sch, struct nlattr *opt)
+static int red_init(struct Qdisc *sch, struct nlattr *opt)
 {
 	struct red_sched_data *q = qdisc_priv(sch);
 
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 239ec53..c2e628d 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -21,6 +21,7 @@
 #include <linux/skbuff.h>
 #include <linux/jhash.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <net/ip.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
@@ -76,7 +77,8 @@
 #define SFQ_DEPTH		128 /* max number of packets per flow */
 #define SFQ_SLOTS		128 /* max number of flows */
 #define SFQ_EMPTY_SLOT		255
-#define SFQ_HASH_DIVISOR	1024
+#define SFQ_DEFAULT_HASH_DIVISOR 1024
+
 /* We use 16 bits to store allot, and want to handle packets up to 64K
  * Scale allot by 8 (1<<3) so that no overflow occurs.
  */
@@ -92,8 +94,7 @@
  * while following values [SFQ_SLOTS ... SFQ_SLOTS + SFQ_DEPTH - 1]
  * are 'pointers' to dep[] array
  */
-struct sfq_head
-{
+struct sfq_head {
 	sfq_index	next;
 	sfq_index	prev;
 };
@@ -108,13 +109,12 @@
 	short		allot; /* credit for this slot */
 };
 
-struct sfq_sched_data
-{
+struct sfq_sched_data {
 /* Parameters */
 	int		perturb_period;
-	unsigned	quantum;	/* Allotment per round: MUST BE >= MTU */
+	unsigned int	quantum;	/* Allotment per round: MUST BE >= MTU */
 	int		limit;
-
+	unsigned int	divisor;	/* number of slots in hash table */
 /* Variables */
 	struct tcf_proto *filter_list;
 	struct timer_list perturb_timer;
@@ -122,7 +122,7 @@
 	sfq_index	cur_depth;	/* depth of longest slot */
 	unsigned short  scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
 	struct sfq_slot *tail;		/* current slot in round */
-	sfq_index	ht[SFQ_HASH_DIVISOR];	/* Hash table */
+	sfq_index	*ht;		/* Hash table (divisor slots) */
 	struct sfq_slot	slots[SFQ_SLOTS];
 	struct sfq_head	dep[SFQ_DEPTH];	/* Linked list of slots, indexed by depth */
 };
@@ -137,12 +137,12 @@
 	return &q->dep[val - SFQ_SLOTS];
 }
 
-static __inline__ unsigned sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
+static unsigned int sfq_fold_hash(struct sfq_sched_data *q, u32 h, u32 h1)
 {
-	return jhash_2words(h, h1, q->perturbation) & (SFQ_HASH_DIVISOR - 1);
+	return jhash_2words(h, h1, q->perturbation) & (q->divisor - 1);
 }
 
-static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
+static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
 {
 	u32 h, h2;
 
@@ -157,13 +157,13 @@
 		iph = ip_hdr(skb);
 		h = (__force u32)iph->daddr;
 		h2 = (__force u32)iph->saddr ^ iph->protocol;
-		if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+		if (iph->frag_off & htons(IP_MF | IP_OFFSET))
 			break;
 		poff = proto_ports_offset(iph->protocol);
 		if (poff >= 0 &&
 		    pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
 			iph = ip_hdr(skb);
-			h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff);
+			h2 ^= *(u32 *)((void *)iph + iph->ihl * 4 + poff);
 		}
 		break;
 	}
@@ -181,7 +181,7 @@
 		if (poff >= 0 &&
 		    pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
 			iph = ipv6_hdr(skb);
-			h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff);
+			h2 ^= *(u32 *)((void *)iph + sizeof(*iph) + poff);
 		}
 		break;
 	}
@@ -203,7 +203,7 @@
 
 	if (TC_H_MAJ(skb->priority) == sch->handle &&
 	    TC_H_MIN(skb->priority) > 0 &&
-	    TC_H_MIN(skb->priority) <= SFQ_HASH_DIVISOR)
+	    TC_H_MIN(skb->priority) <= q->divisor)
 		return TC_H_MIN(skb->priority);
 
 	if (!q->filter_list)
@@ -221,7 +221,7 @@
 			return 0;
 		}
 #endif
-		if (TC_H_MIN(res.classid) <= SFQ_HASH_DIVISOR)
+		if (TC_H_MIN(res.classid) <= q->divisor)
 			return TC_H_MIN(res.classid);
 	}
 	return 0;
@@ -402,10 +402,8 @@
 		q->tail = slot;
 		slot->allot = q->scaled_quantum;
 	}
-	if (++sch->q.qlen <= q->limit) {
-		qdisc_bstats_update(sch, skb);
+	if (++sch->q.qlen <= q->limit)
 		return NET_XMIT_SUCCESS;
-	}
 
 	sfq_drop(sch);
 	return NET_XMIT_CN;
@@ -445,6 +443,7 @@
 	}
 	skb = slot_dequeue_head(slot);
 	sfq_dec(q, a);
+	qdisc_bstats_update(sch, skb);
 	sch->q.qlen--;
 	sch->qstats.backlog -= qdisc_pkt_len(skb);
 
@@ -492,13 +491,18 @@
 	if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
 		return -EINVAL;
 
+	if (ctl->divisor &&
+	    (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
+		return -EINVAL;
+
 	sch_tree_lock(sch);
 	q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
 	q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
 	q->perturb_period = ctl->perturb_period * HZ;
 	if (ctl->limit)
 		q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
-
+	if (ctl->divisor)
+		q->divisor = ctl->divisor;
 	qlen = sch->q.qlen;
 	while (sch->q.qlen > q->limit)
 		sfq_drop(sch);
@@ -516,15 +520,13 @@
 static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
 {
 	struct sfq_sched_data *q = qdisc_priv(sch);
+	size_t sz;
 	int i;
 
 	q->perturb_timer.function = sfq_perturbation;
 	q->perturb_timer.data = (unsigned long)sch;
 	init_timer_deferrable(&q->perturb_timer);
 
-	for (i = 0; i < SFQ_HASH_DIVISOR; i++)
-		q->ht[i] = SFQ_EMPTY_SLOT;
-
 	for (i = 0; i < SFQ_DEPTH; i++) {
 		q->dep[i].next = i + SFQ_SLOTS;
 		q->dep[i].prev = i + SFQ_SLOTS;
@@ -533,6 +535,7 @@
 	q->limit = SFQ_DEPTH - 1;
 	q->cur_depth = 0;
 	q->tail = NULL;
+	q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
 	if (opt == NULL) {
 		q->quantum = psched_mtu(qdisc_dev(sch));
 		q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
@@ -544,10 +547,23 @@
 			return err;
 	}
 
+	sz = sizeof(q->ht[0]) * q->divisor;
+	q->ht = kmalloc(sz, GFP_KERNEL);
+	if (!q->ht && sz > PAGE_SIZE)
+		q->ht = vmalloc(sz);
+	if (!q->ht)
+		return -ENOMEM;
+	for (i = 0; i < q->divisor; i++)
+		q->ht[i] = SFQ_EMPTY_SLOT;
+
 	for (i = 0; i < SFQ_SLOTS; i++) {
 		slot_queue_init(&q->slots[i]);
 		sfq_link(q, i);
 	}
+	if (q->limit >= 1)
+		sch->flags |= TCQ_F_CAN_BYPASS;
+	else
+		sch->flags &= ~TCQ_F_CAN_BYPASS;
 	return 0;
 }
 
@@ -558,6 +574,10 @@
 	tcf_destroy_chain(&q->filter_list);
 	q->perturb_period = 0;
 	del_timer_sync(&q->perturb_timer);
+	if (is_vmalloc_addr(q->ht))
+		vfree(q->ht);
+	else
+		kfree(q->ht);
 }
 
 static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -570,7 +590,7 @@
 	opt.perturb_period = q->perturb_period / HZ;
 
 	opt.limit = q->limit;
-	opt.divisor = SFQ_HASH_DIVISOR;
+	opt.divisor = q->divisor;
 	opt.flows = q->limit;
 
 	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
@@ -595,6 +615,8 @@
 static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
 			      u32 classid)
 {
+	/* we cannot bypass queue discipline anymore */
+	sch->flags &= ~TCQ_F_CAN_BYPASS;
 	return 0;
 }
 
@@ -648,7 +670,7 @@
 	if (arg->stop)
 		return;
 
-	for (i = 0; i < SFQ_HASH_DIVISOR; i++) {
+	for (i = 0; i < q->divisor; i++) {
 		if (q->ht[i] == SFQ_EMPTY_SLOT ||
 		    arg->count < arg->skip) {
 			arg->count++;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 77565e7..1dcfb52 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -97,8 +97,7 @@
 	changed the limit is not effective anymore.
 */
 
-struct tbf_sched_data
-{
+struct tbf_sched_data {
 /* Parameters */
 	u32		limit;		/* Maximal length of backlog: bytes */
 	u32		buffer;		/* Token bucket depth/rate: MUST BE >= MTU/B */
@@ -115,10 +114,10 @@
 	struct qdisc_watchdog watchdog;	/* Watchdog timer */
 };
 
-#define L2T(q,L)   qdisc_l2t((q)->R_tab,L)
-#define L2T_P(q,L) qdisc_l2t((q)->P_tab,L)
+#define L2T(q, L)   qdisc_l2t((q)->R_tab, L)
+#define L2T_P(q, L) qdisc_l2t((q)->P_tab, L)
 
-static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
 	struct tbf_sched_data *q = qdisc_priv(sch);
 	int ret;
@@ -134,11 +133,10 @@
 	}
 
 	sch->q.qlen++;
-	qdisc_bstats_update(sch, skb);
 	return NET_XMIT_SUCCESS;
 }
 
-static unsigned int tbf_drop(struct Qdisc* sch)
+static unsigned int tbf_drop(struct Qdisc *sch)
 {
 	struct tbf_sched_data *q = qdisc_priv(sch);
 	unsigned int len = 0;
@@ -150,7 +148,7 @@
 	return len;
 }
 
-static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
+static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
 {
 	struct tbf_sched_data *q = qdisc_priv(sch);
 	struct sk_buff *skb;
@@ -186,7 +184,8 @@
 			q->tokens = toks;
 			q->ptokens = ptoks;
 			sch->q.qlen--;
-			sch->flags &= ~TCQ_F_THROTTLED;
+			qdisc_unthrottled(sch);
+			qdisc_bstats_update(sch, skb);
 			return skb;
 		}
 
@@ -209,7 +208,7 @@
 	return NULL;
 }
 
-static void tbf_reset(struct Qdisc* sch)
+static void tbf_reset(struct Qdisc *sch)
 {
 	struct tbf_sched_data *q = qdisc_priv(sch);
 
@@ -227,7 +226,7 @@
 	[TCA_TBF_PTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
 };
 
-static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
+static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
 {
 	int err;
 	struct tbf_sched_data *q = qdisc_priv(sch);
@@ -236,7 +235,7 @@
 	struct qdisc_rate_table *rtab = NULL;
 	struct qdisc_rate_table *ptab = NULL;
 	struct Qdisc *child = NULL;
-	int max_size,n;
+	int max_size, n;
 
 	err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy);
 	if (err < 0)
@@ -259,15 +258,18 @@
 	}
 
 	for (n = 0; n < 256; n++)
-		if (rtab->data[n] > qopt->buffer) break;
-	max_size = (n << qopt->rate.cell_log)-1;
+		if (rtab->data[n] > qopt->buffer)
+			break;
+	max_size = (n << qopt->rate.cell_log) - 1;
 	if (ptab) {
 		int size;
 
 		for (n = 0; n < 256; n++)
-			if (ptab->data[n] > qopt->mtu) break;
-		size = (n << qopt->peakrate.cell_log)-1;
-		if (size < max_size) max_size = size;
+			if (ptab->data[n] > qopt->mtu)
+				break;
+		size = (n << qopt->peakrate.cell_log) - 1;
+		if (size < max_size)
+			max_size = size;
 	}
 	if (max_size < 0)
 		goto done;
@@ -310,7 +312,7 @@
 	return err;
 }
 
-static int tbf_init(struct Qdisc* sch, struct nlattr *opt)
+static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
 {
 	struct tbf_sched_data *q = qdisc_priv(sch);
 
@@ -422,8 +424,7 @@
 	}
 }
 
-static const struct Qdisc_class_ops tbf_class_ops =
-{
+static const struct Qdisc_class_ops tbf_class_ops = {
 	.graft		=	tbf_graft,
 	.leaf		=	tbf_leaf,
 	.get		=	tbf_get,
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 84ce48e..45cd300 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -53,8 +53,7 @@
       which will not break load balancing, though native slave
       traffic will have the highest priority.  */
 
-struct teql_master
-{
+struct teql_master {
 	struct Qdisc_ops qops;
 	struct net_device *dev;
 	struct Qdisc *slaves;
@@ -65,29 +64,27 @@
 	unsigned long	tx_dropped;
 };
 
-struct teql_sched_data
-{
+struct teql_sched_data {
 	struct Qdisc *next;
 	struct teql_master *m;
 	struct neighbour *ncache;
 	struct sk_buff_head q;
 };
 
-#define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next)
+#define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next)
 
-#define FMASK (IFF_BROADCAST|IFF_POINTOPOINT)
+#define FMASK (IFF_BROADCAST | IFF_POINTOPOINT)
 
 /* "teql*" qdisc routines */
 
 static int
-teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+teql_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
 	struct net_device *dev = qdisc_dev(sch);
 	struct teql_sched_data *q = qdisc_priv(sch);
 
 	if (q->q.qlen < dev->tx_queue_len) {
 		__skb_queue_tail(&q->q, skb);
-		qdisc_bstats_update(sch, skb);
 		return NET_XMIT_SUCCESS;
 	}
 
@@ -97,7 +94,7 @@
 }
 
 static struct sk_buff *
-teql_dequeue(struct Qdisc* sch)
+teql_dequeue(struct Qdisc *sch)
 {
 	struct teql_sched_data *dat = qdisc_priv(sch);
 	struct netdev_queue *dat_queue;
@@ -111,19 +108,21 @@
 			dat->m->slaves = sch;
 			netif_wake_queue(m);
 		}
+	} else {
+		qdisc_bstats_update(sch, skb);
 	}
 	sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
 	return skb;
 }
 
 static struct sk_buff *
-teql_peek(struct Qdisc* sch)
+teql_peek(struct Qdisc *sch)
 {
 	/* teql is meant to be used as root qdisc */
 	return NULL;
 }
 
-static __inline__ void
+static inline void
 teql_neigh_release(struct neighbour *n)
 {
 	if (n)
@@ -131,7 +130,7 @@
 }
 
 static void
-teql_reset(struct Qdisc* sch)
+teql_reset(struct Qdisc *sch)
 {
 	struct teql_sched_data *dat = qdisc_priv(sch);
 
@@ -141,13 +140,14 @@
 }
 
 static void
-teql_destroy(struct Qdisc* sch)
+teql_destroy(struct Qdisc *sch)
 {
 	struct Qdisc *q, *prev;
 	struct teql_sched_data *dat = qdisc_priv(sch);
 	struct teql_master *master = dat->m;
 
-	if ((prev = master->slaves) != NULL) {
+	prev = master->slaves;
+	if (prev) {
 		do {
 			q = NEXT_SLAVE(prev);
 			if (q == sch) {
@@ -179,7 +179,7 @@
 static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
 {
 	struct net_device *dev = qdisc_dev(sch);
-	struct teql_master *m = (struct teql_master*)sch->ops;
+	struct teql_master *m = (struct teql_master *)sch->ops;
 	struct teql_sched_data *q = qdisc_priv(sch);
 
 	if (dev->hard_header_len > m->dev->hard_header_len)
@@ -290,7 +290,8 @@
 	nores = 0;
 	busy = 0;
 
-	if ((q = start) == NULL)
+	q = start;
+	if (!q)
 		goto drop;
 
 	do {
@@ -355,10 +356,10 @@
 
 static int teql_master_open(struct net_device *dev)
 {
-	struct Qdisc * q;
+	struct Qdisc *q;
 	struct teql_master *m = netdev_priv(dev);
 	int mtu = 0xFFFE;
-	unsigned flags = IFF_NOARP|IFF_MULTICAST;
+	unsigned int flags = IFF_NOARP | IFF_MULTICAST;
 
 	if (m->slaves == NULL)
 		return -EUNATCH;
@@ -426,7 +427,7 @@
 		do {
 			if (new_mtu > qdisc_dev(q)->mtu)
 				return -EINVAL;
-		} while ((q=NEXT_SLAVE(q)) != m->slaves);
+		} while ((q = NEXT_SLAVE(q)) != m->slaves);
 	}
 
 	dev->mtu = new_mtu;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 2cc46f0..b23428f 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2029,11 +2029,11 @@
 			*errp = sctp_make_op_error_fixed(asoc, chunk);
 
 		if (*errp) {
-			sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
-					WORD_ROUND(ntohs(param.p->length)));
-			sctp_addto_chunk_fixed(*errp,
-					WORD_ROUND(ntohs(param.p->length)),
-					param.v);
+			if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
+					WORD_ROUND(ntohs(param.p->length))))
+				sctp_addto_chunk_fixed(*errp,
+						WORD_ROUND(ntohs(param.p->length)),
+						param.v);
 		} else {
 			/* If there is no memory for generating the ERROR
 			 * report as specified, an ABORT will be triggered
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index a09b0dd..b53b2eb 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3428,7 +3428,7 @@
 		retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
 		break;
 
-	case SCTP_DELAYED_ACK:
+	case SCTP_DELAYED_SACK:
 		retval = sctp_setsockopt_delayed_ack(sk, optval, optlen);
 		break;
 	case SCTP_PARTIAL_DELIVERY_POINT:
@@ -5333,7 +5333,7 @@
 		retval = sctp_getsockopt_peer_addr_params(sk, len, optval,
 							  optlen);
 		break;
-	case SCTP_DELAYED_ACK:
+	case SCTP_DELAYED_SACK:
 		retval = sctp_getsockopt_delayed_ack(sk, len, optval,
 							  optlen);
 		break;
@@ -6102,15 +6102,16 @@
 			wake_up_interruptible(&asoc->wait);
 
 		if (sctp_writeable(sk)) {
-			if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-				wake_up_interruptible(sk_sleep(sk));
+			wait_queue_head_t *wq = sk_sleep(sk);
+
+			if (wq && waitqueue_active(wq))
+				wake_up_interruptible(wq);
 
 			/* Note that we try to include the Async I/O support
 			 * here by modeling from the current TCP/UDP code.
 			 * We have not tested with it yet.
 			 */
-			if (sock->wq->fasync_list &&
-			    !(sk->sk_shutdown & SEND_SHUTDOWN))
+			if (!(sk->sk_shutdown & SEND_SHUTDOWN))
 				sock_wake_async(sock,
 						SOCK_WAKE_SPACE, POLL_OUT);
 		}
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index 747d541..f1e40ceb 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -344,7 +344,7 @@
 
 	/* Refresh the gap ack information. */
 	if (sctp_tsnmap_has_gap(map)) {
-		__u16 start, end;
+		__u16 start = 0, end = 0;
 		sctp_tsnmap_iter_init(map, &iter);
 		while (sctp_tsnmap_next_gap_ack(map, &iter,
 						&start,
diff --git a/net/socket.c b/net/socket.c
index ac2219f..9fa1e3b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -240,17 +240,19 @@
 static struct inode *sock_alloc_inode(struct super_block *sb)
 {
 	struct socket_alloc *ei;
+	struct socket_wq *wq;
 
 	ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
-	ei->socket.wq = kmalloc(sizeof(struct socket_wq), GFP_KERNEL);
-	if (!ei->socket.wq) {
+	wq = kmalloc(sizeof(*wq), GFP_KERNEL);
+	if (!wq) {
 		kmem_cache_free(sock_inode_cachep, ei);
 		return NULL;
 	}
-	init_waitqueue_head(&ei->socket.wq->wait);
-	ei->socket.wq->fasync_list = NULL;
+	init_waitqueue_head(&wq->wait);
+	wq->fasync_list = NULL;
+	RCU_INIT_POINTER(ei->socket.wq, wq);
 
 	ei->socket.state = SS_UNCONNECTED;
 	ei->socket.flags = 0;
@@ -273,9 +275,11 @@
 static void sock_destroy_inode(struct inode *inode)
 {
 	struct socket_alloc *ei;
+	struct socket_wq *wq;
 
 	ei = container_of(inode, struct socket_alloc, vfs_inode);
-	call_rcu(&ei->socket.wq->rcu, wq_free_rcu);
+	wq = rcu_dereference_protected(ei->socket.wq, 1);
+	call_rcu(&wq->rcu, wq_free_rcu);
 	kmem_cache_free(sock_inode_cachep, ei);
 }
 
@@ -524,7 +528,7 @@
 		module_put(owner);
 	}
 
-	if (sock->wq->fasync_list)
+	if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
 		printk(KERN_ERR "sock_release: fasync list not empty!\n");
 
 	percpu_sub(sockets_in_use, 1);
@@ -1108,15 +1112,16 @@
 {
 	struct socket *sock = filp->private_data;
 	struct sock *sk = sock->sk;
+	struct socket_wq *wq;
 
 	if (sk == NULL)
 		return -EINVAL;
 
 	lock_sock(sk);
+	wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk));
+	fasync_helper(fd, filp, on, &wq->fasync_list);
 
-	fasync_helper(fd, filp, on, &sock->wq->fasync_list);
-
-	if (!sock->wq->fasync_list)
+	if (!wq->fasync_list)
 		sock_reset_flag(sk, SOCK_FASYNC);
 	else
 		sock_set_flag(sk, SOCK_FASYNC);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 7bd3bbb..b7d435c 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -420,6 +420,7 @@
 static void svc_udp_data_ready(struct sock *sk, int count)
 {
 	struct svc_sock	*svsk = (struct svc_sock *)sk->sk_user_data;
+	wait_queue_head_t *wq = sk_sleep(sk);
 
 	if (svsk) {
 		dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
@@ -428,8 +429,8 @@
 		set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
 		svc_xprt_enqueue(&svsk->sk_xprt);
 	}
-	if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-		wake_up_interruptible(sk_sleep(sk));
+	if (wq && waitqueue_active(wq))
+		wake_up_interruptible(wq);
 }
 
 /*
@@ -438,6 +439,7 @@
 static void svc_write_space(struct sock *sk)
 {
 	struct svc_sock	*svsk = (struct svc_sock *)(sk->sk_user_data);
+	wait_queue_head_t *wq = sk_sleep(sk);
 
 	if (svsk) {
 		dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
@@ -445,10 +447,10 @@
 		svc_xprt_enqueue(&svsk->sk_xprt);
 	}
 
-	if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) {
+	if (wq && waitqueue_active(wq)) {
 		dprintk("RPC svc_write_space: someone sleeping on %p\n",
 		       svsk);
-		wake_up_interruptible(sk_sleep(sk));
+		wake_up_interruptible(wq);
 	}
 }
 
@@ -739,6 +741,7 @@
 static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
 {
 	struct svc_sock	*svsk = (struct svc_sock *)sk->sk_user_data;
+	wait_queue_head_t *wq;
 
 	dprintk("svc: socket %p TCP (listen) state change %d\n",
 		sk, sk->sk_state);
@@ -761,8 +764,9 @@
 			printk("svc: socket %p: no user data\n", sk);
 	}
 
-	if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-		wake_up_interruptible_all(sk_sleep(sk));
+	wq = sk_sleep(sk);
+	if (wq && waitqueue_active(wq))
+		wake_up_interruptible_all(wq);
 }
 
 /*
@@ -771,6 +775,7 @@
 static void svc_tcp_state_change(struct sock *sk)
 {
 	struct svc_sock	*svsk = (struct svc_sock *)sk->sk_user_data;
+	wait_queue_head_t *wq = sk_sleep(sk);
 
 	dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n",
 		sk, sk->sk_state, sk->sk_user_data);
@@ -781,13 +786,14 @@
 		set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
 		svc_xprt_enqueue(&svsk->sk_xprt);
 	}
-	if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-		wake_up_interruptible_all(sk_sleep(sk));
+	if (wq && waitqueue_active(wq))
+		wake_up_interruptible_all(wq);
 }
 
 static void svc_tcp_data_ready(struct sock *sk, int count)
 {
 	struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
+	wait_queue_head_t *wq = sk_sleep(sk);
 
 	dprintk("svc: socket %p TCP data ready (svsk %p)\n",
 		sk, sk->sk_user_data);
@@ -795,8 +801,8 @@
 		set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
 		svc_xprt_enqueue(&svsk->sk_xprt);
 	}
-	if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-		wake_up_interruptible(sk_sleep(sk));
+	if (wq && waitqueue_active(wq))
+		wake_up_interruptible(wq);
 }
 
 /*
@@ -1531,6 +1537,7 @@
 {
 	struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
 	struct sock *sk = svsk->sk_sk;
+	wait_queue_head_t *wq;
 
 	dprintk("svc: svc_sock_detach(%p)\n", svsk);
 
@@ -1539,8 +1546,9 @@
 	sk->sk_data_ready = svsk->sk_odata;
 	sk->sk_write_space = svsk->sk_owspace;
 
-	if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
-		wake_up_interruptible(sk_sleep(sk));
+	wq = sk_sleep(sk);
+	if (wq && waitqueue_active(wq))
+		wake_up_interruptible(wq);
 }
 
 /*
@@ -1609,9 +1617,7 @@
  */
 static void svc_bc_sock_free(struct svc_xprt *xprt)
 {
-	if (xprt) {
-		kfree(xprt->xpt_bc_sid);
+	if (xprt)
 		kfree(container_of(xprt, struct svc_sock, sk_xprt));
-	}
 }
 #endif /* CONFIG_NFS_V4_1 */
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index dd419d2..217fb7f 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1171,7 +1171,7 @@
 	newsk->sk_type		= sk->sk_type;
 	init_peercred(newsk);
 	newu = unix_sk(newsk);
-	newsk->sk_wq		= &newu->peer_wq;
+	RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
 	otheru = unix_sk(other);
 
 	/* copy address information from listening to new sock*/
@@ -1475,6 +1475,12 @@
 			goto out_free;
 	}
 
+	if (sk_filter(other, skb) < 0) {
+		/* Toss the packet but do not return any error to the sender */
+		err = len;
+		goto out_free;
+	}
+
 	unix_state_lock(other);
 	err = -EPERM;
 	if (!unix_may_send(sk, other))
@@ -1978,36 +1984,38 @@
 
 	mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
 
-	if (mode) {
-		unix_state_lock(sk);
-		sk->sk_shutdown |= mode;
-		other = unix_peer(sk);
-		if (other)
-			sock_hold(other);
-		unix_state_unlock(sk);
-		sk->sk_state_change(sk);
+	if (!mode)
+		return 0;
 
-		if (other &&
-			(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
+	unix_state_lock(sk);
+	sk->sk_shutdown |= mode;
+	other = unix_peer(sk);
+	if (other)
+		sock_hold(other);
+	unix_state_unlock(sk);
+	sk->sk_state_change(sk);
 
-			int peer_mode = 0;
+	if (other &&
+		(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
 
-			if (mode&RCV_SHUTDOWN)
-				peer_mode |= SEND_SHUTDOWN;
-			if (mode&SEND_SHUTDOWN)
-				peer_mode |= RCV_SHUTDOWN;
-			unix_state_lock(other);
-			other->sk_shutdown |= peer_mode;
-			unix_state_unlock(other);
-			other->sk_state_change(other);
-			if (peer_mode == SHUTDOWN_MASK)
-				sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
-			else if (peer_mode & RCV_SHUTDOWN)
-				sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
-		}
-		if (other)
-			sock_put(other);
+		int peer_mode = 0;
+
+		if (mode&RCV_SHUTDOWN)
+			peer_mode |= SEND_SHUTDOWN;
+		if (mode&SEND_SHUTDOWN)
+			peer_mode |= RCV_SHUTDOWN;
+		unix_state_lock(other);
+		other->sk_shutdown |= peer_mode;
+		unix_state_unlock(other);
+		other->sk_state_change(other);
+		if (peer_mode == SHUTDOWN_MASK)
+			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
+		else if (peer_mode & RCV_SHUTDOWN)
+			sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
 	}
+	if (other)
+		sock_put(other);
+
 	return 0;
 }
 
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 74944a2..788a12c 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -59,8 +59,6 @@
 #include <asm/uaccess.h>        /* copy_to/from_user */
 #include <linux/init.h>         /* __initfunc et al. */
 
-#define KMEM_SAFETYZONE 8
-
 #define DEV_TO_SLAVE(dev)	(*((struct net_device **)netdev_priv(dev)))
 
 /*
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index d0ee290..1f1ef70 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -95,7 +95,7 @@
 	  If unsure, say N.
 
 config CFG80211_INTERNAL_REGDB
-	bool "use statically compiled regulatory rules database" if EMBEDDED
+	bool "use statically compiled regulatory rules database" if EXPERT
 	default n
 	depends on CFG80211
 	---help---
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 55187c8..4062075 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -27,9 +27,19 @@
 #include <net/sock.h>
 #include <net/x25.h>
 
-/*
- * Parse a set of facilities into the facilities structures. Unrecognised
- *	facilities are written to the debug log file.
+/**
+ * x25_parse_facilities - Parse facilities from skb into the facilities structs
+ *
+ * @skb: sk_buff to parse
+ * @facilities: Regular facilites, updated as facilities are found
+ * @dte_facs: ITU DTE facilities, updated as DTE facilities are found
+ * @vc_fac_mask: mask is updated with all facilities found
+ *
+ * Return codes:
+ *  -1 - Parsing error, caller should drop call and clean up
+ *   0 - Parse OK, this skb has no facilities
+ *  >0 - Parse OK, returns the length of the facilities header
+ *
  */
 int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
 		struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask)
@@ -62,7 +72,7 @@
 		switch (*p & X25_FAC_CLASS_MASK) {
 		case X25_FAC_CLASS_A:
 			if (len < 2)
-				return 0;
+				return -1;
 			switch (*p) {
 			case X25_FAC_REVERSE:
 				if((p[1] & 0x81) == 0x81) {
@@ -107,7 +117,7 @@
 			break;
 		case X25_FAC_CLASS_B:
 			if (len < 3)
-				return 0;
+				return -1;
 			switch (*p) {
 			case X25_FAC_PACKET_SIZE:
 				facilities->pacsize_in  = p[1];
@@ -130,7 +140,7 @@
 			break;
 		case X25_FAC_CLASS_C:
 			if (len < 4)
-				return 0;
+				return -1;
 			printk(KERN_DEBUG "X.25: unknown facility %02X, "
 			       "values %02X, %02X, %02X\n",
 			       p[0], p[1], p[2], p[3]);
@@ -139,18 +149,18 @@
 			break;
 		case X25_FAC_CLASS_D:
 			if (len < p[1] + 2)
-				return 0;
+				return -1;
 			switch (*p) {
 			case X25_FAC_CALLING_AE:
 				if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
-					return 0;
+					return -1;
 				dte_facs->calling_len = p[2];
 				memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
 				*vc_fac_mask |= X25_MASK_CALLING_AE;
 				break;
 			case X25_FAC_CALLED_AE:
 				if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
-					return 0;
+					return -1;
 				dte_facs->called_len = p[2];
 				memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
 				*vc_fac_mask |= X25_MASK_CALLED_AE;
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index f729f02..15de65f 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -91,10 +91,10 @@
 {
 	struct x25_address source_addr, dest_addr;
 	int len;
+	struct x25_sock *x25 = x25_sk(sk);
 
 	switch (frametype) {
 		case X25_CALL_ACCEPTED: {
-			struct x25_sock *x25 = x25_sk(sk);
 
 			x25_stop_timer(sk);
 			x25->condition = 0x00;
@@ -113,14 +113,16 @@
 						&dest_addr);
 			if (len > 0)
 				skb_pull(skb, len);
+			else if (len < 0)
+				goto out_clear;
 
 			len = x25_parse_facilities(skb, &x25->facilities,
 						&x25->dte_facilities,
 						&x25->vc_facil_mask);
 			if (len > 0)
 				skb_pull(skb, len);
-			else
-				return -1;
+			else if (len < 0)
+				goto out_clear;
 			/*
 			 *	Copy any Call User Data.
 			 */
@@ -144,6 +146,12 @@
 	}
 
 	return 0;
+
+out_clear:
+	x25_write_internal(sk, X25_CLEAR_REQUEST);
+	x25->state = X25_STATE_2;
+	x25_start_t23timer(sk);
+	return 0;
 }
 
 /*
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index 4cbc942..2130692 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -396,9 +396,12 @@
 	write_lock_bh(&x25_neigh_list_lock);
 
 	list_for_each_safe(entry, tmp, &x25_neigh_list) {
+		struct net_device *dev;
+
 		nb = list_entry(entry, struct x25_neigh, node);
+		dev = nb->dev;
 		__x25_remove_neigh(nb);
-		dev_put(nb->dev);
+		dev_put(dev);
 	}
 	write_unlock_bh(&x25_neigh_list_lock);
 }
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 8b3ef40..7a8e2c7 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1340,10 +1340,13 @@
 	default:
 		BUG();
 	}
-	xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS);
+	xdst = dst_alloc(dst_ops, 0);
 	xfrm_policy_put_afinfo(afinfo);
 
-	xdst->flo.ops = &xfrm_bundle_fc_ops;
+	if (likely(xdst))
+		xdst->flo.ops = &xfrm_bundle_fc_ops;
+	else
+		xdst = ERR_PTR(-ENOBUFS);
 
 	return xdst;
 }
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index d5e1e0b..6129196 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2189,7 +2189,7 @@
 
 	if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
 	     type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
-	    (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
+	    (nlh->nlmsg_flags & NLM_F_DUMP)) {
 		if (link->dump == NULL)
 			return -EINVAL;
 
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index b0b2357..f6cbc3d 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -238,12 +238,12 @@
 fi
 
 # Build header package
-find . -name Makefile -o -name Kconfig\* -o -name \*.pl > /tmp/files$$
-find arch/x86/include include scripts -type f >> /tmp/files$$
+(cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > /tmp/files$$)
+(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> /tmp/files$$)
 (cd $objtree; find .config Module.symvers include scripts -type f >> /tmp/objfiles$$)
 destdir=$kernel_headers_dir/usr/src/linux-headers-$version
 mkdir -p "$destdir"
-tar -c -f - -T /tmp/files$$ | (cd $destdir; tar -xf -)
+(cd $srctree; tar -c -f - -T /tmp/files$$) | (cd $destdir; tar -xf -)
 (cd $objtree; tar -c -f - -T /tmp/objfiles$$) | (cd $destdir; tar -xf -)
 rm -f /tmp/files$$ /tmp/objfiles$$
 arch=$(dpkg --print-architecture)
diff --git a/security/keys/Makefile b/security/keys/Makefile
index 6c94105..1bf090a 100644
--- a/security/keys/Makefile
+++ b/security/keys/Makefile
@@ -13,8 +13,8 @@
 	request_key_auth.o \
 	user_defined.o
 
-obj-$(CONFIG_TRUSTED_KEYS) += trusted_defined.o
-obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted_defined.o
+obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted.o
 obj-$(CONFIG_KEYS_COMPAT) += compat.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/security/keys/compat.c b/security/keys/compat.c
index 792c0a6..07a5f35 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -1,4 +1,4 @@
-/* compat.c: 32-bit compatibility syscall for 64-bit systems
+/* 32-bit compatibility syscall for 64-bit systems
  *
  * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
@@ -14,13 +14,13 @@
 #include <linux/compat.h>
 #include "internal.h"
 
-/*****************************************************************************/
 /*
- * the key control system call, 32-bit compatibility version for 64-bit archs
- * - this should only be called if the 64-bit arch uses weird pointers in
- *   32-bit mode or doesn't guarantee that the top 32-bits of the argument
- *   registers on taking a 32-bit syscall are zero
- * - if you can, you should call sys_keyctl directly
+ * The key control system call, 32-bit compatibility version for 64-bit archs
+ *
+ * This should only be called if the 64-bit arch uses weird pointers in 32-bit
+ * mode or doesn't guarantee that the top 32-bits of the argument registers on
+ * taking a 32-bit syscall are zero.  If you can, you should call sys_keyctl()
+ * directly.
  */
 asmlinkage long compat_sys_keyctl(u32 option,
 				  u32 arg2, u32 arg3, u32 arg4, u32 arg5)
@@ -88,5 +88,4 @@
 	default:
 		return -EOPNOTSUPP;
 	}
-
-} /* end compat_sys_keyctl() */
+}
diff --git a/security/keys/encrypted_defined.c b/security/keys/encrypted.c
similarity index 99%
rename from security/keys/encrypted_defined.c
rename to security/keys/encrypted.c
index 32d27c8..9e7e4ce 100644
--- a/security/keys/encrypted_defined.c
+++ b/security/keys/encrypted.c
@@ -30,7 +30,7 @@
 #include <crypto/sha.h>
 #include <crypto/aes.h>
 
-#include "encrypted_defined.h"
+#include "encrypted.h"
 
 static const char KEY_TRUSTED_PREFIX[] = "trusted:";
 static const char KEY_USER_PREFIX[] = "user:";
diff --git a/security/keys/encrypted_defined.h b/security/keys/encrypted.h
similarity index 100%
rename from security/keys/encrypted_defined.h
rename to security/keys/encrypted.h
diff --git a/security/keys/gc.c b/security/keys/gc.c
index a46e825..89df6b5 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -32,8 +32,8 @@
 static time_t key_gc_new_timer;
 
 /*
- * Schedule a garbage collection run
- * - precision isn't particularly important
+ * Schedule a garbage collection run.
+ * - time precision isn't particularly important
  */
 void key_schedule_gc(time_t gc_at)
 {
@@ -61,8 +61,9 @@
 }
 
 /*
- * Garbage collect pointers from a keyring
- * - return true if we altered the keyring
+ * Garbage collect pointers from a keyring.
+ *
+ * Return true if we altered the keyring.
  */
 static bool key_gc_keyring(struct key *keyring, time_t limit)
 	__releases(key_serial_lock)
@@ -107,9 +108,8 @@
 }
 
 /*
- * Garbage collector for keys
- * - this involves scanning the keyrings for dead, expired and revoked keys
- *   that have overstayed their welcome
+ * Garbage collector for keys.  This involves scanning the keyrings for dead,
+ * expired and revoked keys that have overstayed their welcome
  */
 static void key_garbage_collector(struct work_struct *work)
 {
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 56a133d..a52aa7c 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -1,4 +1,4 @@
-/* internal.h: authentication token and access key management internal defs
+/* Authentication token and access key management internal defs
  *
  * Copyright (C) 2003-5, 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
@@ -35,10 +35,12 @@
 
 /*****************************************************************************/
 /*
- * keep track of keys for a user
- * - this needs to be separate to user_struct to avoid a refcount-loop
- *   (user_struct pins some keyrings which pin this struct)
- * - this also keeps track of keys under request from userspace for this UID
+ * Keep track of keys for a user.
+ *
+ * This needs to be separate to user_struct to avoid a refcount-loop
+ * (user_struct pins some keyrings which pin this struct).
+ *
+ * We also keep track of keys under request from userspace for this UID here.
  */
 struct key_user {
 	struct rb_node		node;
@@ -62,7 +64,7 @@
 extern void key_user_put(struct key_user *user);
 
 /*
- * key quota limits
+ * Key quota limits.
  * - root has its own separate limits to everyone else
  */
 extern unsigned key_quota_root_maxkeys;
@@ -85,13 +87,13 @@
 extern int __key_link_begin(struct key *keyring,
 			    const struct key_type *type,
 			    const char *description,
-			    struct keyring_list **_prealloc);
+			    unsigned long *_prealloc);
 extern int __key_link_check_live_key(struct key *keyring, struct key *key);
 extern void __key_link(struct key *keyring, struct key *key,
-		       struct keyring_list **_prealloc);
+		       unsigned long *_prealloc);
 extern void __key_link_end(struct key *keyring,
 			   struct key_type *type,
-			   struct keyring_list *prealloc);
+			   unsigned long prealloc);
 
 extern key_ref_t __keyring_search_one(key_ref_t keyring_ref,
 				      const struct key_type *type,
@@ -146,13 +148,13 @@
 extern void keyring_gc(struct key *keyring, time_t limit);
 extern void key_schedule_gc(time_t expiry_at);
 
-/*
- * check to see whether permission is granted to use a key in the desired way
- */
 extern int key_task_permission(const key_ref_t key_ref,
 			       const struct cred *cred,
 			       key_perm_t perm);
 
+/*
+ * Check to see whether permission is granted to use a key in the desired way.
+ */
 static inline int key_permission(const key_ref_t key_ref, key_perm_t perm)
 {
 	return key_task_permission(key_ref, current_cred(), perm);
@@ -168,7 +170,7 @@
 #define	KEY_ALL		0x3f	/* all the above permissions */
 
 /*
- * request_key authorisation
+ * Authorisation record for request_key().
  */
 struct request_key_auth {
 	struct key		*target_key;
@@ -188,7 +190,7 @@
 extern struct key *key_get_instantiation_authkey(key_serial_t target_id);
 
 /*
- * keyctl functions
+ * keyctl() functions
  */
 extern long keyctl_get_keyring_ID(key_serial_t, int);
 extern long keyctl_join_session_keyring(const char __user *);
@@ -214,7 +216,7 @@
 extern long keyctl_session_to_parent(void);
 
 /*
- * debugging key validation
+ * Debugging key validation
  */
 #ifdef KEY_DEBUGGING
 extern void __key_check(const struct key *);
diff --git a/security/keys/key.c b/security/keys/key.c
index c1eac80..1c2d43d 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -39,10 +39,10 @@
 static void key_cleanup(struct work_struct *work);
 static DECLARE_WORK(key_cleanup_task, key_cleanup);
 
-/* we serialise key instantiation and link */
+/* We serialise key instantiation and link */
 DEFINE_MUTEX(key_construction_mutex);
 
-/* any key who's type gets unegistered will be re-typed to this */
+/* Any key who's type gets unegistered will be re-typed to this */
 static struct key_type key_type_dead = {
 	.name		= "dead",
 };
@@ -56,10 +56,9 @@
 }
 #endif
 
-/*****************************************************************************/
 /*
- * get the key quota record for a user, allocating a new record if one doesn't
- * already exist
+ * Get the key quota record for a user, allocating a new record if one doesn't
+ * already exist.
  */
 struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
 {
@@ -67,7 +66,7 @@
 	struct rb_node *parent = NULL;
 	struct rb_node **p;
 
- try_again:
+try_again:
 	p = &key_user_tree.rb_node;
 	spin_lock(&key_user_lock);
 
@@ -124,18 +123,16 @@
 	goto out;
 
 	/* okay - we found a user record for this UID */
- found:
+found:
 	atomic_inc(&user->usage);
 	spin_unlock(&key_user_lock);
 	kfree(candidate);
- out:
+out:
 	return user;
+}
 
-} /* end key_user_lookup() */
-
-/*****************************************************************************/
 /*
- * dispose of a user structure
+ * Dispose of a user structure
  */
 void key_user_put(struct key_user *user)
 {
@@ -146,14 +143,11 @@
 
 		kfree(user);
 	}
+}
 
-} /* end key_user_put() */
-
-/*****************************************************************************/
 /*
- * assign a key the next unique serial number
- * - these are assigned randomly to avoid security issues through covert
- *   channel problems
+ * Allocate a serial number for a key.  These are assigned randomly to avoid
+ * security issues through covert channel problems.
  */
 static inline void key_alloc_serial(struct key *key)
 {
@@ -211,18 +205,36 @@
 		if (key->serial < xkey->serial)
 			goto attempt_insertion;
 	}
+}
 
-} /* end key_alloc_serial() */
-
-/*****************************************************************************/
-/*
- * allocate a key of the specified type
- * - update the user's quota to reflect the existence of the key
- * - called from a key-type operation with key_types_sem read-locked by
- *   key_create_or_update()
- *   - this prevents unregistration of the key type
- * - upon return the key is as yet uninstantiated; the caller needs to either
- *   instantiate the key or discard it before returning
+/**
+ * key_alloc - Allocate a key of the specified type.
+ * @type: The type of key to allocate.
+ * @desc: The key description to allow the key to be searched out.
+ * @uid: The owner of the new key.
+ * @gid: The group ID for the new key's group permissions.
+ * @cred: The credentials specifying UID namespace.
+ * @perm: The permissions mask of the new key.
+ * @flags: Flags specifying quota properties.
+ *
+ * Allocate a key of the specified type with the attributes given.  The key is
+ * returned in an uninstantiated state and the caller needs to instantiate the
+ * key before returning.
+ *
+ * The user's key count quota is updated to reflect the creation of the key and
+ * the user's key data quota has the default for the key type reserved.  The
+ * instantiation function should amend this as necessary.  If insufficient
+ * quota is available, -EDQUOT will be returned.
+ *
+ * The LSM security modules can prevent a key being created, in which case
+ * -EACCES will be returned.
+ *
+ * Returns a pointer to the new key if successful and an error code otherwise.
+ *
+ * Note that the caller needs to ensure the key type isn't uninstantiated.
+ * Internally this can be done by locking key_types_sem.  Externally, this can
+ * be done by either never unregistering the key type, or making sure
+ * key_alloc() calls don't race with module unloading.
  */
 struct key *key_alloc(struct key_type *type, const char *desc,
 		      uid_t uid, gid_t gid, const struct cred *cred,
@@ -344,14 +356,19 @@
 	key_user_put(user);
 	key = ERR_PTR(-EDQUOT);
 	goto error;
-
-} /* end key_alloc() */
-
+}
 EXPORT_SYMBOL(key_alloc);
 
-/*****************************************************************************/
-/*
- * reserve an amount of quota for the key's payload
+/**
+ * key_payload_reserve - Adjust data quota reservation for the key's payload
+ * @key: The key to make the reservation for.
+ * @datalen: The amount of data payload the caller now wants.
+ *
+ * Adjust the amount of the owning user's key data quota that a key reserves.
+ * If the amount is increased, then -EDQUOT may be returned if there isn't
+ * enough free quota available.
+ *
+ * If successful, 0 is returned.
  */
 int key_payload_reserve(struct key *key, size_t datalen)
 {
@@ -384,22 +401,21 @@
 		key->datalen = datalen;
 
 	return ret;
-
-} /* end key_payload_reserve() */
-
+}
 EXPORT_SYMBOL(key_payload_reserve);
 
-/*****************************************************************************/
 /*
- * instantiate a key and link it into the target keyring atomically
- * - called with the target keyring's semaphore writelocked
+ * Instantiate a key and link it into the target keyring atomically.  Must be
+ * called with the target keyring's semaphore writelocked.  The target key's
+ * semaphore need not be locked as instantiation is serialised by
+ * key_construction_mutex.
  */
 static int __key_instantiate_and_link(struct key *key,
 				      const void *data,
 				      size_t datalen,
 				      struct key *keyring,
 				      struct key *authkey,
-				      struct keyring_list **_prealloc)
+				      unsigned long *_prealloc)
 {
 	int ret, awaken;
 
@@ -441,12 +457,23 @@
 		wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
 
 	return ret;
+}
 
-} /* end __key_instantiate_and_link() */
-
-/*****************************************************************************/
-/*
- * instantiate a key and link it into the target keyring atomically
+/**
+ * key_instantiate_and_link - Instantiate a key and link it into the keyring.
+ * @key: The key to instantiate.
+ * @data: The data to use to instantiate the keyring.
+ * @datalen: The length of @data.
+ * @keyring: Keyring to create a link in on success (or NULL).
+ * @authkey: The authorisation token permitting instantiation.
+ *
+ * Instantiate a key that's in the uninstantiated state using the provided data
+ * and, if successful, link it in to the destination keyring if one is
+ * supplied.
+ *
+ * If successful, 0 is returned, the authorisation token is revoked and anyone
+ * waiting for the key is woken up.  If the key was already instantiated,
+ * -EBUSY will be returned.
  */
 int key_instantiate_and_link(struct key *key,
 			     const void *data,
@@ -454,7 +481,7 @@
 			     struct key *keyring,
 			     struct key *authkey)
 {
-	struct keyring_list *prealloc;
+	unsigned long prealloc;
 	int ret;
 
 	if (keyring) {
@@ -471,21 +498,35 @@
 		__key_link_end(keyring, key->type, prealloc);
 
 	return ret;
-
-} /* end key_instantiate_and_link() */
+}
 
 EXPORT_SYMBOL(key_instantiate_and_link);
 
-/*****************************************************************************/
-/*
- * negatively instantiate a key and link it into the target keyring atomically
+/**
+ * key_negate_and_link - Negatively instantiate a key and link it into the keyring.
+ * @key: The key to instantiate.
+ * @timeout: The timeout on the negative key.
+ * @keyring: Keyring to create a link in on success (or NULL).
+ * @authkey: The authorisation token permitting instantiation.
+ *
+ * Negatively instantiate a key that's in the uninstantiated state and, if
+ * successful, set its timeout and link it in to the destination keyring if one
+ * is supplied.  The key and any links to the key will be automatically garbage
+ * collected after the timeout expires.
+ *
+ * Negative keys are used to rate limit repeated request_key() calls by causing
+ * them to return -ENOKEY until the negative key expires.
+ *
+ * If successful, 0 is returned, the authorisation token is revoked and anyone
+ * waiting for the key is woken up.  If the key was already instantiated,
+ * -EBUSY will be returned.
  */
 int key_negate_and_link(struct key *key,
 			unsigned timeout,
 			struct key *keyring,
 			struct key *authkey)
 {
-	struct keyring_list *prealloc;
+	unsigned long prealloc;
 	struct timespec now;
 	int ret, awaken, link_ret = 0;
 
@@ -535,22 +576,23 @@
 		wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
 
 	return ret == 0 ? link_ret : ret;
-
-} /* end key_negate_and_link() */
+}
 
 EXPORT_SYMBOL(key_negate_and_link);
 
-/*****************************************************************************/
 /*
- * do cleaning up in process context so that we don't have to disable
- * interrupts all over the place
+ * Garbage collect keys in process context so that we don't have to disable
+ * interrupts all over the place.
+ *
+ * key_put() schedules this rather than trying to do the cleanup itself, which
+ * means key_put() doesn't have to sleep.
  */
 static void key_cleanup(struct work_struct *work)
 {
 	struct rb_node *_n;
 	struct key *key;
 
- go_again:
+go_again:
 	/* look for a dead key in the tree */
 	spin_lock(&key_serial_lock);
 
@@ -564,7 +606,7 @@
 	spin_unlock(&key_serial_lock);
 	return;
 
- found_dead_key:
+found_dead_key:
 	/* we found a dead key - once we've removed it from the tree, we can
 	 * drop the lock */
 	rb_erase(&key->serial_node, &key_serial_tree);
@@ -601,14 +643,15 @@
 
 	/* there may, of course, be more than one key to destroy */
 	goto go_again;
+}
 
-} /* end key_cleanup() */
-
-/*****************************************************************************/
-/*
- * dispose of a reference to a key
- * - when all the references are gone, we schedule the cleanup task to come and
- *   pull it out of the tree in definite process context
+/**
+ * key_put - Discard a reference to a key.
+ * @key: The key to discard a reference from.
+ *
+ * Discard a reference to a key, and when all the references are gone, we
+ * schedule the cleanup task to come and pull it out of the tree in process
+ * context at some later time.
  */
 void key_put(struct key *key)
 {
@@ -618,14 +661,11 @@
 		if (atomic_dec_and_test(&key->usage))
 			schedule_work(&key_cleanup_task);
 	}
-
-} /* end key_put() */
-
+}
 EXPORT_SYMBOL(key_put);
 
-/*****************************************************************************/
 /*
- * find a key by its serial number
+ * Find a key by its serial number.
  */
 struct key *key_lookup(key_serial_t id)
 {
@@ -647,11 +687,11 @@
 			goto found;
 	}
 
- not_found:
+not_found:
 	key = ERR_PTR(-ENOKEY);
 	goto error;
 
- found:
+found:
 	/* pretend it doesn't exist if it is awaiting deletion */
 	if (atomic_read(&key->usage) == 0)
 		goto not_found;
@@ -661,16 +701,16 @@
 	 */
 	atomic_inc(&key->usage);
 
- error:
+error:
 	spin_unlock(&key_serial_lock);
 	return key;
+}
 
-} /* end key_lookup() */
-
-/*****************************************************************************/
 /*
- * find and lock the specified key type against removal
- * - we return with the sem readlocked
+ * Find and lock the specified key type against removal.
+ *
+ * We return with the sem read-locked if successful.  If the type wasn't
+ * available -ENOKEY is returned instead.
  */
 struct key_type *key_type_lookup(const char *type)
 {
@@ -688,26 +728,23 @@
 	up_read(&key_types_sem);
 	ktype = ERR_PTR(-ENOKEY);
 
- found_kernel_type:
+found_kernel_type:
 	return ktype;
+}
 
-} /* end key_type_lookup() */
-
-/*****************************************************************************/
 /*
- * unlock a key type
+ * Unlock a key type locked by key_type_lookup().
  */
 void key_type_put(struct key_type *ktype)
 {
 	up_read(&key_types_sem);
+}
 
-} /* end key_type_put() */
-
-/*****************************************************************************/
 /*
- * attempt to update an existing key
- * - the key has an incremented refcount
- * - we need to put the key if we get an error
+ * Attempt to update an existing key.
+ *
+ * The key is given to us with an incremented refcount that we need to discard
+ * if we get an error.
  */
 static inline key_ref_t __key_update(key_ref_t key_ref,
 				     const void *payload, size_t plen)
@@ -742,13 +779,32 @@
 	key_put(key);
 	key_ref = ERR_PTR(ret);
 	goto out;
+}
 
-} /* end __key_update() */
-
-/*****************************************************************************/
-/*
- * search the specified keyring for a key of the same description; if one is
- * found, update it, otherwise add a new one
+/**
+ * key_create_or_update - Update or create and instantiate a key.
+ * @keyring_ref: A pointer to the destination keyring with possession flag.
+ * @type: The type of key.
+ * @description: The searchable description for the key.
+ * @payload: The data to use to instantiate or update the key.
+ * @plen: The length of @payload.
+ * @perm: The permissions mask for a new key.
+ * @flags: The quota flags for a new key.
+ *
+ * Search the destination keyring for a key of the same description and if one
+ * is found, update it, otherwise create and instantiate a new one and create a
+ * link to it from that keyring.
+ *
+ * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be
+ * concocted.
+ *
+ * Returns a pointer to the new key if successful, -ENODEV if the key type
+ * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the
+ * caller isn't permitted to modify the keyring or the LSM did not permit
+ * creation of the key.
+ *
+ * On success, the possession flag from the keyring ref will be tacked on to
+ * the key ref before it is returned.
  */
 key_ref_t key_create_or_update(key_ref_t keyring_ref,
 			       const char *type,
@@ -758,7 +814,7 @@
 			       key_perm_t perm,
 			       unsigned long flags)
 {
-	struct keyring_list *prealloc;
+	unsigned long prealloc;
 	const struct cred *cred = current_cred();
 	struct key_type *ktype;
 	struct key *keyring, *key = NULL;
@@ -855,14 +911,21 @@
 
 	key_ref = __key_update(key_ref, payload, plen);
 	goto error;
-
-} /* end key_create_or_update() */
-
+}
 EXPORT_SYMBOL(key_create_or_update);
 
-/*****************************************************************************/
-/*
- * update a key
+/**
+ * key_update - Update a key's contents.
+ * @key_ref: The pointer (plus possession flag) to the key.
+ * @payload: The data to be used to update the key.
+ * @plen: The length of @payload.
+ *
+ * Attempt to update the contents of a key with the given payload data.  The
+ * caller must be granted Write permission on the key.  Negative keys can be
+ * instantiated by this method.
+ *
+ * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key
+ * type does not support updating.  The key type may return other errors.
  */
 int key_update(key_ref_t key_ref, const void *payload, size_t plen)
 {
@@ -891,14 +954,17 @@
 
  error:
 	return ret;
-
-} /* end key_update() */
-
+}
 EXPORT_SYMBOL(key_update);
 
-/*****************************************************************************/
-/*
- * revoke a key
+/**
+ * key_revoke - Revoke a key.
+ * @key: The key to be revoked.
+ *
+ * Mark a key as being revoked and ask the type to free up its resources.  The
+ * revocation timeout is set and the key and all its links will be
+ * automatically garbage collected after key_gc_delay amount of time if they
+ * are not manually dealt with first.
  */
 void key_revoke(struct key *key)
 {
@@ -926,14 +992,16 @@
 	}
 
 	up_write(&key->sem);
-
-} /* end key_revoke() */
-
+}
 EXPORT_SYMBOL(key_revoke);
 
-/*****************************************************************************/
-/*
- * register a type of key
+/**
+ * register_key_type - Register a type of key.
+ * @ktype: The new key type.
+ *
+ * Register a new key type.
+ *
+ * Returns 0 on success or -EEXIST if a type of this name already exists.
  */
 int register_key_type(struct key_type *ktype)
 {
@@ -953,17 +1021,19 @@
 	list_add(&ktype->link, &key_types_list);
 	ret = 0;
 
- out:
+out:
 	up_write(&key_types_sem);
 	return ret;
-
-} /* end register_key_type() */
-
+}
 EXPORT_SYMBOL(register_key_type);
 
-/*****************************************************************************/
-/*
- * unregister a type of key
+/**
+ * unregister_key_type - Unregister a type of key.
+ * @ktype: The key type.
+ *
+ * Unregister a key type and mark all the extant keys of this type as dead.
+ * Those keys of this type are then destroyed to get rid of their payloads and
+ * they and their links will be garbage collected as soon as possible.
  */
 void unregister_key_type(struct key_type *ktype)
 {
@@ -1010,14 +1080,11 @@
 	up_write(&key_types_sem);
 
 	key_schedule_gc(0);
-
-} /* end unregister_key_type() */
-
+}
 EXPORT_SYMBOL(unregister_key_type);
 
-/*****************************************************************************/
 /*
- * initialise the key management stuff
+ * Initialise the key management state.
  */
 void __init key_init(void)
 {
@@ -1037,5 +1104,4 @@
 
 	rb_insert_color(&root_key_user.node,
 			&key_user_tree);
-
-} /* end key_init() */
+}
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 60924f6..31a0fd8 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1,4 +1,4 @@
-/* keyctl.c: userspace keyctl operations
+/* Userspace key control operations
  *
  * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
@@ -31,28 +31,24 @@
 	int ret;
 
 	ret = strncpy_from_user(type, _type, len);
-
 	if (ret < 0)
 		return ret;
-
 	if (ret == 0 || ret >= len)
 		return -EINVAL;
-
 	if (type[0] == '.')
 		return -EPERM;
-
 	type[len - 1] = '\0';
-
 	return 0;
 }
 
-/*****************************************************************************/
 /*
- * extract the description of a new key from userspace and either add it as a
- * new key to the specified keyring or update a matching key in that keyring
- * - the keyring must be writable
- * - returns the new key's serial number
- * - implements add_key()
+ * Extract the description of a new key from userspace and either add it as a
+ * new key to the specified keyring or update a matching key in that keyring.
+ *
+ * The keyring must be writable so that we can attach the key to it.
+ *
+ * If successful, the new key's serial number is returned, otherwise an error
+ * code is returned.
  */
 SYSCALL_DEFINE5(add_key, const char __user *, _type,
 		const char __user *, _description,
@@ -132,19 +128,20 @@
 	kfree(description);
  error:
 	return ret;
+}
 
-} /* end sys_add_key() */
-
-/*****************************************************************************/
 /*
- * search the process keyrings for a matching key
- * - nested keyrings may also be searched if they have Search permission
- * - if a key is found, it will be attached to the destination keyring if
- *   there's one specified
- * - /sbin/request-key will be invoked if _callout_info is non-NULL
- *   - the _callout_info string will be passed to /sbin/request-key
- *   - if the _callout_info string is empty, it will be rendered as "-"
- * - implements request_key()
+ * Search the process keyrings and keyring trees linked from those for a
+ * matching key.  Keyrings must have appropriate Search permission to be
+ * searched.
+ *
+ * If a key is found, it will be attached to the destination keyring if there's
+ * one specified and the serial number of the key will be returned.
+ *
+ * If no key is found, /sbin/request-key will be invoked if _callout_info is
+ * non-NULL in an attempt to create a key.  The _callout_info string will be
+ * passed to /sbin/request-key to aid with completing the request.  If the
+ * _callout_info string is "" then it will be changed to "-".
  */
 SYSCALL_DEFINE4(request_key, const char __user *, _type,
 		const char __user *, _description,
@@ -222,14 +219,14 @@
 	kfree(description);
 error:
 	return ret;
+}
 
-} /* end sys_request_key() */
-
-/*****************************************************************************/
 /*
- * get the ID of the specified process keyring
- * - the keyring must have search permission to be found
- * - implements keyctl(KEYCTL_GET_KEYRING_ID)
+ * Get the ID of the specified process keyring.
+ *
+ * The requested keyring must have search permission to be found.
+ *
+ * If successful, the ID of the requested keyring will be returned.
  */
 long keyctl_get_keyring_ID(key_serial_t id, int create)
 {
@@ -248,13 +245,17 @@
 	key_ref_put(key_ref);
 error:
 	return ret;
+}
 
-} /* end keyctl_get_keyring_ID() */
-
-/*****************************************************************************/
 /*
- * join the session keyring
- * - implements keyctl(KEYCTL_JOIN_SESSION_KEYRING)
+ * Join a (named) session keyring.
+ *
+ * Create and join an anonymous session keyring or join a named session
+ * keyring, creating it if necessary.  A named session keyring must have Search
+ * permission for it to be joined.  Session keyrings without this permit will
+ * be skipped over.
+ *
+ * If successful, the ID of the joined session keyring will be returned.
  */
 long keyctl_join_session_keyring(const char __user *_name)
 {
@@ -277,14 +278,17 @@
 
 error:
 	return ret;
+}
 
-} /* end keyctl_join_session_keyring() */
-
-/*****************************************************************************/
 /*
- * update a key's data payload
- * - the key must be writable
- * - implements keyctl(KEYCTL_UPDATE)
+ * Update a key's data payload from the given data.
+ *
+ * The key must grant the caller Write permission and the key type must support
+ * updating for this to work.  A negative key can be positively instantiated
+ * with this call.
+ *
+ * If successful, 0 will be returned.  If the key type does not support
+ * updating, then -EOPNOTSUPP will be returned.
  */
 long keyctl_update_key(key_serial_t id,
 		       const void __user *_payload,
@@ -326,14 +330,17 @@
 	kfree(payload);
 error:
 	return ret;
+}
 
-} /* end keyctl_update_key() */
-
-/*****************************************************************************/
 /*
- * revoke a key
- * - the key must be writable
- * - implements keyctl(KEYCTL_REVOKE)
+ * Revoke a key.
+ *
+ * The key must be grant the caller Write or Setattr permission for this to
+ * work.  The key type should give up its quota claim when revoked.  The key
+ * and any links to the key will be automatically garbage collected after a
+ * certain amount of time (/proc/sys/kernel/keys/gc_delay).
+ *
+ * If successful, 0 is returned.
  */
 long keyctl_revoke_key(key_serial_t id)
 {
@@ -358,14 +365,14 @@
 	key_ref_put(key_ref);
 error:
 	return ret;
+}
 
-} /* end keyctl_revoke_key() */
-
-/*****************************************************************************/
 /*
- * clear the specified process keyring
- * - the keyring must be writable
- * - implements keyctl(KEYCTL_CLEAR)
+ * Clear the specified keyring, creating an empty process keyring if one of the
+ * special keyring IDs is used.
+ *
+ * The keyring must grant the caller Write permission for this to work.  If
+ * successful, 0 will be returned.
  */
 long keyctl_keyring_clear(key_serial_t ringid)
 {
@@ -383,15 +390,18 @@
 	key_ref_put(keyring_ref);
 error:
 	return ret;
+}
 
-} /* end keyctl_keyring_clear() */
-
-/*****************************************************************************/
 /*
- * link a key into a keyring
- * - the keyring must be writable
- * - the key must be linkable
- * - implements keyctl(KEYCTL_LINK)
+ * Create a link from a keyring to a key if there's no matching key in the
+ * keyring, otherwise replace the link to the matching key with a link to the
+ * new key.
+ *
+ * The key must grant the caller Link permission and the the keyring must grant
+ * the caller Write permission.  Furthermore, if an additional link is created,
+ * the keyring's quota will be extended.
+ *
+ * If successful, 0 will be returned.
  */
 long keyctl_keyring_link(key_serial_t id, key_serial_t ringid)
 {
@@ -417,15 +427,16 @@
 	key_ref_put(keyring_ref);
 error:
 	return ret;
+}
 
-} /* end keyctl_keyring_link() */
-
-/*****************************************************************************/
 /*
- * unlink the first attachment of a key from a keyring
- * - the keyring must be writable
- * - we don't need any permissions on the key
- * - implements keyctl(KEYCTL_UNLINK)
+ * Unlink a key from a keyring.
+ *
+ * The keyring must grant the caller Write permission for this to work; the key
+ * itself need not grant the caller anything.  If the last link to a key is
+ * removed then that key will be scheduled for destruction.
+ *
+ * If successful, 0 will be returned.
  */
 long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid)
 {
@@ -451,19 +462,20 @@
 	key_ref_put(keyring_ref);
 error:
 	return ret;
+}
 
-} /* end keyctl_keyring_unlink() */
-
-/*****************************************************************************/
 /*
- * describe a user key
- * - the key must have view permission
- * - if there's a buffer, we place up to buflen bytes of data into it
- * - unless there's an error, we return the amount of description available,
- *   irrespective of how much we may have copied
- * - the description is formatted thus:
+ * Return a description of a key to userspace.
+ *
+ * The key must grant the caller View permission for this to work.
+ *
+ * If there's a buffer, we place up to buflen bytes of data into it formatted
+ * in the following way:
+ *
  *	type;uid;gid;perm;description<NUL>
- * - implements keyctl(KEYCTL_DESCRIBE)
+ *
+ * If successful, we return the amount of description available, irrespective
+ * of how much we may have copied into the buffer.
  */
 long keyctl_describe_key(key_serial_t keyid,
 			 char __user *buffer,
@@ -531,18 +543,17 @@
 	key_ref_put(key_ref);
 error:
 	return ret;
+}
 
-} /* end keyctl_describe_key() */
-
-/*****************************************************************************/
 /*
- * search the specified keyring for a matching key
- * - the start keyring must be searchable
- * - nested keyrings may also be searched if they are searchable
- * - only keys with search permission may be found
- * - if a key is found, it will be attached to the destination keyring if
- *   there's one specified
- * - implements keyctl(KEYCTL_SEARCH)
+ * Search the specified keyring and any keyrings it links to for a matching
+ * key.  Only keyrings that grant the caller Search permission will be searched
+ * (this includes the starting keyring).  Only keys with Search permission can
+ * be found.
+ *
+ * If successful, the found key will be linked to the destination keyring if
+ * supplied and the key has Link permission, and the found key ID will be
+ * returned.
  */
 long keyctl_keyring_search(key_serial_t ringid,
 			   const char __user *_type,
@@ -626,18 +637,17 @@
 	kfree(description);
 error:
 	return ret;
+}
 
-} /* end keyctl_keyring_search() */
-
-/*****************************************************************************/
 /*
- * read a user key's payload
- * - the keyring must be readable or the key must be searchable from the
- *   process's keyrings
- * - if there's a buffer, we place up to buflen bytes of data into it
- * - unless there's an error, we return the amount of data in the key,
- *   irrespective of how much we may have copied
- * - implements keyctl(KEYCTL_READ)
+ * Read a key's payload.
+ *
+ * The key must either grant the caller Read permission, or it must grant the
+ * caller Search permission when searched for from the process keyrings.
+ *
+ * If successful, we place up to buflen bytes of data into the buffer, if one
+ * is provided, and return the amount of data that is available in the key,
+ * irrespective of how much we copied into the buffer.
  */
 long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
 {
@@ -688,15 +698,22 @@
 	key_put(key);
 error:
 	return ret;
+}
 
-} /* end keyctl_read_key() */
-
-/*****************************************************************************/
 /*
- * change the ownership of a key
- * - the keyring owned by the changer
- * - if the uid or gid is -1, then that parameter is not changed
- * - implements keyctl(KEYCTL_CHOWN)
+ * Change the ownership of a key
+ *
+ * The key must grant the caller Setattr permission for this to work, though
+ * the key need not be fully instantiated yet.  For the UID to be changed, or
+ * for the GID to be changed to a group the caller is not a member of, the
+ * caller must have sysadmin capability.  If either uid or gid is -1 then that
+ * attribute is not changed.
+ *
+ * If the UID is to be changed, the new user must have sufficient quota to
+ * accept the key.  The quota deduction will be removed from the old user to
+ * the new user should the attribute be changed.
+ *
+ * If successful, 0 will be returned.
  */
 long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
 {
@@ -796,14 +813,14 @@
 	zapowner = newowner;
 	ret = -EDQUOT;
 	goto error_put;
+}
 
-} /* end keyctl_chown_key() */
-
-/*****************************************************************************/
 /*
- * change the permission mask on a key
- * - the keyring owned by the changer
- * - implements keyctl(KEYCTL_SETPERM)
+ * Change the permission mask on a key.
+ *
+ * The key must grant the caller Setattr permission for this to work, though
+ * the key need not be fully instantiated yet.  If the caller does not have
+ * sysadmin capability, it may only change the permission on keys that it owns.
  */
 long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
 {
@@ -838,11 +855,11 @@
 	key_put(key);
 error:
 	return ret;
-
-} /* end keyctl_setperm_key() */
+}
 
 /*
- * get the destination keyring for instantiation
+ * Get the destination keyring for instantiation and check that the caller has
+ * Write permission on it.
  */
 static long get_instantiation_keyring(key_serial_t ringid,
 				      struct request_key_auth *rka,
@@ -879,7 +896,7 @@
 }
 
 /*
- * change the request_key authorisation key on the current process
+ * Change the request_key authorisation key on the current process.
  */
 static int keyctl_change_reqkey_auth(struct key *key)
 {
@@ -895,10 +912,14 @@
 	return commit_creds(new);
 }
 
-/*****************************************************************************/
 /*
- * instantiate the key with the specified payload, and, if one is given, link
- * the key into the keyring
+ * Instantiate a key with the specified payload and link the key into the
+ * destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority).  No other permissions are required.
+ *
+ * If successful, 0 will be returned.
  */
 long keyctl_instantiate_key(key_serial_t id,
 			    const void __user *_payload,
@@ -973,13 +994,22 @@
 		vfree(payload);
 error:
 	return ret;
+}
 
-} /* end keyctl_instantiate_key() */
-
-/*****************************************************************************/
 /*
- * negatively instantiate the key with the given timeout (in seconds), and, if
- * one is given, link the key into the keyring
+ * Negatively instantiate the key with the given timeout (in seconds) and link
+ * the key into the destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority).  No other permissions are required.
+ *
+ * The key and any links to the key will be automatically garbage collected
+ * after the timeout expires.
+ *
+ * Negative keys are used to rate limit repeated request_key() calls by causing
+ * them to return -ENOKEY until the negative key expires.
+ *
+ * If successful, 0 will be returned.
  */
 long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid)
 {
@@ -1020,13 +1050,14 @@
 
 error:
 	return ret;
+}
 
-} /* end keyctl_negate_key() */
-
-/*****************************************************************************/
 /*
- * set the default keyring in which request_key() will cache keys
- * - return the old setting
+ * Read or set the default keyring in which request_key() will cache keys and
+ * return the old setting.
+ *
+ * If a process keyring is specified then this will be created if it doesn't
+ * yet exist.  The old setting will be returned if successful.
  */
 long keyctl_set_reqkey_keyring(int reqkey_defl)
 {
@@ -1079,12 +1110,19 @@
 error:
 	abort_creds(new);
 	return ret;
+}
 
-} /* end keyctl_set_reqkey_keyring() */
-
-/*****************************************************************************/
 /*
- * set or clear the timeout for a key
+ * Set or clear the timeout on a key.
+ *
+ * Either the key must grant the caller Setattr permission or else the caller
+ * must hold an instantiation authorisation token for the key.
+ *
+ * The timeout is either 0 to clear the timeout, or a number of seconds from
+ * the current time.  The key and any links to the key will be automatically
+ * garbage collected after the timeout expires.
+ *
+ * If successful, 0 is returned.
  */
 long keyctl_set_timeout(key_serial_t id, unsigned timeout)
 {
@@ -1136,12 +1174,24 @@
 	ret = 0;
 error:
 	return ret;
+}
 
-} /* end keyctl_set_timeout() */
-
-/*****************************************************************************/
 /*
- * assume the authority to instantiate the specified key
+ * Assume (or clear) the authority to instantiate the specified key.
+ *
+ * This sets the authoritative token currently in force for key instantiation.
+ * This must be done for a key to be instantiated.  It has the effect of making
+ * available all the keys from the caller of the request_key() that created a
+ * key to request_key() calls made by the caller of this function.
+ *
+ * The caller must have the instantiation key in their process keyrings with a
+ * Search permission grant available to the caller.
+ *
+ * If the ID given is 0, then the setting will be cleared and 0 returned.
+ *
+ * If the ID given has a matching an authorisation key, then that key will be
+ * set and its ID will be returned.  The authorisation key can be read to get
+ * the callout information passed to request_key().
  */
 long keyctl_assume_authority(key_serial_t id)
 {
@@ -1178,16 +1228,17 @@
 	ret = authkey->serial;
 error:
 	return ret;
-
-} /* end keyctl_assume_authority() */
+}
 
 /*
- * get the security label of a key
- * - the key must grant us view permission
- * - if there's a buffer, we place up to buflen bytes of data into it
- * - unless there's an error, we return the amount of information available,
- *   irrespective of how much we may have copied (including the terminal NUL)
- * - implements keyctl(KEYCTL_GET_SECURITY)
+ * Get a key's the LSM security label.
+ *
+ * The key must grant the caller View permission for this to work.
+ *
+ * If there's a buffer, then up to buflen bytes of data will be placed into it.
+ *
+ * If successful, the amount of information available will be returned,
+ * irrespective of how much was copied (including the terminal NUL).
  */
 long keyctl_get_security(key_serial_t keyid,
 			 char __user *buffer,
@@ -1242,10 +1293,16 @@
 }
 
 /*
- * attempt to install the calling process's session keyring on the process's
- * parent process
- * - the keyring must exist and must grant us LINK permission
- * - implements keyctl(KEYCTL_SESSION_TO_PARENT)
+ * Attempt to install the calling process's session keyring on the process's
+ * parent process.
+ *
+ * The keyring must exist and must grant the caller LINK permission, and the
+ * parent process must be single-threaded and must have the same effective
+ * ownership as this process and mustn't be SUID/SGID.
+ *
+ * The keyring will be emplaced on the parent when it next resumes userspace.
+ *
+ * If successful, 0 will be returned.
  */
 long keyctl_session_to_parent(void)
 {
@@ -1348,9 +1405,8 @@
 #endif /* !TIF_NOTIFY_RESUME */
 }
 
-/*****************************************************************************/
 /*
- * the key control system call
+ * The key control system call
  */
 SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
 		unsigned long, arg4, unsigned long, arg5)
@@ -1439,5 +1495,4 @@
 	default:
 		return -EOPNOTSUPP;
 	}
-
-} /* end sys_keyctl() */
+}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index d37f713..5620f08 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -25,14 +25,16 @@
 		(keyring)->payload.subscriptions,			\
 		rwsem_is_locked((struct rw_semaphore *)&(keyring)->sem)))
 
+#define KEY_LINK_FIXQUOTA 1UL
+
 /*
- * when plumbing the depths of the key tree, this sets a hard limit set on how
- * deep we're willing to go
+ * When plumbing the depths of the key tree, this sets a hard limit
+ * set on how deep we're willing to go.
  */
 #define KEYRING_SEARCH_MAX_DEPTH 6
 
 /*
- * we keep all named keyrings in a hash to speed looking them up
+ * We keep all named keyrings in a hash to speed looking them up.
  */
 #define KEYRING_NAME_HASH_SIZE	(1 << 5)
 
@@ -50,7 +52,9 @@
 }
 
 /*
- * the keyring type definition
+ * The keyring key type definition.  Keyrings are simply keys of this type and
+ * can be treated as ordinary keys in addition to having their own special
+ * operations.
  */
 static int keyring_instantiate(struct key *keyring,
 			       const void *data, size_t datalen);
@@ -71,19 +75,17 @@
 	.describe	= keyring_describe,
 	.read		= keyring_read,
 };
-
 EXPORT_SYMBOL(key_type_keyring);
 
 /*
- * semaphore to serialise link/link calls to prevent two link calls in parallel
- * introducing a cycle
+ * Semaphore to serialise link/link calls to prevent two link calls in parallel
+ * introducing a cycle.
  */
 static DECLARE_RWSEM(keyring_serialise_link_sem);
 
-/*****************************************************************************/
 /*
- * publish the name of a keyring so that it can be found by name (if it has
- * one)
+ * Publish the name of a keyring so that it can be found by name (if it has
+ * one).
  */
 static void keyring_publish_name(struct key *keyring)
 {
@@ -102,13 +104,12 @@
 
 		write_unlock(&keyring_name_lock);
 	}
+}
 
-} /* end keyring_publish_name() */
-
-/*****************************************************************************/
 /*
- * initialise a keyring
- * - we object if we were given any data
+ * Initialise a keyring.
+ *
+ * Returns 0 on success, -EINVAL if given any data.
  */
 static int keyring_instantiate(struct key *keyring,
 			       const void *data, size_t datalen)
@@ -123,23 +124,20 @@
 	}
 
 	return ret;
+}
 
-} /* end keyring_instantiate() */
-
-/*****************************************************************************/
 /*
- * match keyrings on their name
+ * Match keyrings on their name
  */
 static int keyring_match(const struct key *keyring, const void *description)
 {
 	return keyring->description &&
 		strcmp(keyring->description, description) == 0;
+}
 
-} /* end keyring_match() */
-
-/*****************************************************************************/
 /*
- * dispose of the data dangling from the corpse of a keyring
+ * Clean up a keyring when it is destroyed.  Unpublish its name if it had one
+ * and dispose of its data.
  */
 static void keyring_destroy(struct key *keyring)
 {
@@ -164,12 +162,10 @@
 			key_put(klist->keys[loop]);
 		kfree(klist);
 	}
+}
 
-} /* end keyring_destroy() */
-
-/*****************************************************************************/
 /*
- * describe the keyring
+ * Describe a keyring for /proc.
  */
 static void keyring_describe(const struct key *keyring, struct seq_file *m)
 {
@@ -187,13 +183,12 @@
 	else
 		seq_puts(m, ": empty");
 	rcu_read_unlock();
+}
 
-} /* end keyring_describe() */
-
-/*****************************************************************************/
 /*
- * read a list of key IDs from the keyring's contents
- * - the keyring's semaphore is read-locked
+ * Read a list of key IDs from the keyring's contents in binary form
+ *
+ * The keyring's semaphore is read-locked by the caller.
  */
 static long keyring_read(const struct key *keyring,
 			 char __user *buffer, size_t buflen)
@@ -241,12 +236,10 @@
 
 error:
 	return ret;
+}
 
-} /* end keyring_read() */
-
-/*****************************************************************************/
 /*
- * allocate a keyring and link into the destination keyring
+ * Allocate a keyring and link into the destination keyring.
  */
 struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
 			  const struct cred *cred, unsigned long flags,
@@ -269,20 +262,42 @@
 	}
 
 	return keyring;
+}
 
-} /* end keyring_alloc() */
-
-/*****************************************************************************/
-/*
- * search the supplied keyring tree for a key that matches the criterion
- * - perform a breadth-then-depth search up to the prescribed limit
- * - we only find keys on which we have search permission
- * - we use the supplied match function to see if the description (or other
- *   feature of interest) matches
- * - we rely on RCU to prevent the keyring lists from disappearing on us
- * - we return -EAGAIN if we didn't find any matching key
- * - we return -ENOKEY if we only found negative matching keys
- * - we propagate the possession attribute from the keyring ref to the key ref
+/**
+ * keyring_search_aux - Search a keyring tree for a key matching some criteria
+ * @keyring_ref: A pointer to the keyring with possession indicator.
+ * @cred: The credentials to use for permissions checks.
+ * @type: The type of key to search for.
+ * @description: Parameter for @match.
+ * @match: Function to rule on whether or not a key is the one required.
+ *
+ * Search the supplied keyring tree for a key that matches the criteria given.
+ * The root keyring and any linked keyrings must grant Search permission to the
+ * caller to be searchable and keys can only be found if they too grant Search
+ * to the caller. The possession flag on the root keyring pointer controls use
+ * of the possessor bits in permissions checking of the entire tree.  In
+ * addition, the LSM gets to forbid keyring searches and key matches.
+ *
+ * The search is performed as a breadth-then-depth search up to the prescribed
+ * limit (KEYRING_SEARCH_MAX_DEPTH).
+ *
+ * Keys are matched to the type provided and are then filtered by the match
+ * function, which is given the description to use in any way it sees fit.  The
+ * match function may use any attributes of a key that it wishes to to
+ * determine the match.  Normally the match function from the key type would be
+ * used.
+ *
+ * RCU is used to prevent the keyring key lists from disappearing without the
+ * need to take lots of locks.
+ *
+ * Returns a pointer to the found key and increments the key usage count if
+ * successful; -EAGAIN if no matching keys were found, or if expired or revoked
+ * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the
+ * specified keyring wasn't a keyring.
+ *
+ * In the case of a successful return, the possession attribute from
+ * @keyring_ref is propagated to the returned key reference.
  */
 key_ref_t keyring_search_aux(key_ref_t keyring_ref,
 			     const struct cred *cred,
@@ -444,17 +459,16 @@
 	rcu_read_unlock();
 error:
 	return key_ref;
+}
 
-} /* end keyring_search_aux() */
-
-/*****************************************************************************/
-/*
- * search the supplied keyring tree for a key that matches the criterion
- * - perform a breadth-then-depth search up to the prescribed limit
- * - we only find keys on which we have search permission
- * - we readlock the keyrings as we search down the tree
- * - we return -EAGAIN if we didn't find any matching key
- * - we return -ENOKEY if we only found negative matching keys
+/**
+ * keyring_search - Search the supplied keyring tree for a matching key
+ * @keyring: The root of the keyring tree to be searched.
+ * @type: The type of keyring we want to find.
+ * @description: The name of the keyring we want to find.
+ *
+ * As keyring_search_aux() above, but using the current task's credentials and
+ * type's default matching function.
  */
 key_ref_t keyring_search(key_ref_t keyring,
 			 struct key_type *type,
@@ -465,16 +479,23 @@
 
 	return keyring_search_aux(keyring, current->cred,
 				  type, description, type->match);
-
-} /* end keyring_search() */
-
+}
 EXPORT_SYMBOL(keyring_search);
 
-/*****************************************************************************/
 /*
- * search the given keyring only (no recursion)
- * - keyring must be locked by caller
- * - caller must guarantee that the keyring is a keyring
+ * Search the given keyring only (no recursion).
+ *
+ * The caller must guarantee that the keyring is a keyring and that the
+ * permission is granted to search the keyring as no check is made here.
+ *
+ * RCU is used to make it unnecessary to lock the keyring key list here.
+ *
+ * Returns a pointer to the found key with usage count incremented if
+ * successful and returns -ENOKEY if not found.  Revoked keys and keys not
+ * providing the requested permission are skipped over.
+ *
+ * If successful, the possession indicator is propagated from the keyring ref
+ * to the returned key reference.
  */
 key_ref_t __keyring_search_one(key_ref_t keyring_ref,
 			       const struct key_type *ktype,
@@ -514,14 +535,18 @@
 	atomic_inc(&key->usage);
 	rcu_read_unlock();
 	return make_key_ref(key, possessed);
+}
 
-} /* end __keyring_search_one() */
-
-/*****************************************************************************/
 /*
- * find a keyring with the specified name
- * - all named keyrings are searched
- * - normally only finds keyrings with search permission for the current process
+ * Find a keyring with the specified name.
+ *
+ * All named keyrings in the current user namespace are searched, provided they
+ * grant Search permission directly to the caller (unless this check is
+ * skipped).  Keyrings whose usage points have reached zero or who have been
+ * revoked are skipped.
+ *
+ * Returns a pointer to the keyring with the keyring's refcount having being
+ * incremented on success.  -ENOKEY is returned if a key could not be found.
  */
 struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
 {
@@ -569,15 +594,14 @@
 out:
 	read_unlock(&keyring_name_lock);
 	return keyring;
+}
 
-} /* end find_keyring_by_name() */
-
-/*****************************************************************************/
 /*
- * see if a cycle will will be created by inserting acyclic tree B in acyclic
- * tree A at the topmost level (ie: as a direct child of A)
- * - since we are adding B to A at the top level, checking for cycles should
- *   just be a matter of seeing if node A is somewhere in tree B
+ * See if a cycle will will be created by inserting acyclic tree B in acyclic
+ * tree A at the topmost level (ie: as a direct child of A).
+ *
+ * Since we are adding B to A at the top level, checking for cycles should just
+ * be a matter of seeing if node A is somewhere in tree B.
  */
 static int keyring_detect_cycle(struct key *A, struct key *B)
 {
@@ -657,11 +681,10 @@
 cycle_detected:
 	ret = -EDEADLK;
 	goto error;
-
-} /* end keyring_detect_cycle() */
+}
 
 /*
- * dispose of a keyring list after the RCU grace period, freeing the unlinked
+ * Dispose of a keyring list after the RCU grace period, freeing the unlinked
  * key
  */
 static void keyring_unlink_rcu_disposal(struct rcu_head *rcu)
@@ -675,14 +698,14 @@
 }
 
 /*
- * preallocate memory so that a key can be linked into to a keyring
+ * Preallocate memory so that a key can be linked into to a keyring.
  */
 int __key_link_begin(struct key *keyring, const struct key_type *type,
-		     const char *description,
-		     struct keyring_list **_prealloc)
+		     const char *description, unsigned long *_prealloc)
 	__acquires(&keyring->sem)
 {
 	struct keyring_list *klist, *nklist;
+	unsigned long prealloc;
 	unsigned max;
 	size_t size;
 	int loop, ret;
@@ -725,6 +748,7 @@
 
 				/* note replacement slot */
 				klist->delkey = nklist->delkey = loop;
+				prealloc = (unsigned long)nklist;
 				goto done;
 			}
 		}
@@ -739,6 +763,7 @@
 	if (klist && klist->nkeys < klist->maxkeys) {
 		/* there's sufficient slack space to append directly */
 		nklist = NULL;
+		prealloc = KEY_LINK_FIXQUOTA;
 	} else {
 		/* grow the key list */
 		max = 4;
@@ -773,8 +798,9 @@
 		nklist->keys[nklist->delkey] = NULL;
 	}
 
+	prealloc = (unsigned long)nklist | KEY_LINK_FIXQUOTA;
 done:
-	*_prealloc = nklist;
+	*_prealloc = prealloc;
 	kleave(" = 0");
 	return 0;
 
@@ -792,10 +818,10 @@
 }
 
 /*
- * check already instantiated keys aren't going to be a problem
- * - the caller must have called __key_link_begin()
- * - don't need to call this for keys that were created since __key_link_begin()
- *   was called
+ * Check already instantiated keys aren't going to be a problem.
+ *
+ * The caller must have called __key_link_begin(). Don't need to call this for
+ * keys that were created since __key_link_begin() was called.
  */
 int __key_link_check_live_key(struct key *keyring, struct key *key)
 {
@@ -807,17 +833,20 @@
 }
 
 /*
- * link a key into to a keyring
- * - must be called with __key_link_begin() having being called
- * - discard already extant link to matching key if there is one
+ * Link a key into to a keyring.
+ *
+ * Must be called with __key_link_begin() having being called.  Discards any
+ * already extant link to matching key if there is one, so that each keyring
+ * holds at most one link to any given key of a particular type+description
+ * combination.
  */
 void __key_link(struct key *keyring, struct key *key,
-		struct keyring_list **_prealloc)
+		unsigned long *_prealloc)
 {
 	struct keyring_list *klist, *nklist;
 
-	nklist = *_prealloc;
-	*_prealloc = NULL;
+	nklist = (struct keyring_list *)(*_prealloc & ~KEY_LINK_FIXQUOTA);
+	*_prealloc = 0;
 
 	kenter("%d,%d,%p", keyring->serial, key->serial, nklist);
 
@@ -852,34 +881,54 @@
 }
 
 /*
- * finish linking a key into to a keyring
- * - must be called with __key_link_begin() having being called
+ * Finish linking a key into to a keyring.
+ *
+ * Must be called with __key_link_begin() having being called.
  */
 void __key_link_end(struct key *keyring, struct key_type *type,
-		    struct keyring_list *prealloc)
+		    unsigned long prealloc)
 	__releases(&keyring->sem)
 {
 	BUG_ON(type == NULL);
 	BUG_ON(type->name == NULL);
-	kenter("%d,%s,%p", keyring->serial, type->name, prealloc);
+	kenter("%d,%s,%lx", keyring->serial, type->name, prealloc);
 
 	if (type == &key_type_keyring)
 		up_write(&keyring_serialise_link_sem);
 
 	if (prealloc) {
-		kfree(prealloc);
-		key_payload_reserve(keyring,
-				    keyring->datalen - KEYQUOTA_LINK_BYTES);
+		if (prealloc & KEY_LINK_FIXQUOTA)
+			key_payload_reserve(keyring,
+					    keyring->datalen -
+					    KEYQUOTA_LINK_BYTES);
+		kfree((struct keyring_list *)(prealloc & ~KEY_LINK_FIXQUOTA));
 	}
 	up_write(&keyring->sem);
 }
 
-/*
- * link a key to a keyring
+/**
+ * key_link - Link a key to a keyring
+ * @keyring: The keyring to make the link in.
+ * @key: The key to link to.
+ *
+ * Make a link in a keyring to a key, such that the keyring holds a reference
+ * on that key and the key can potentially be found by searching that keyring.
+ *
+ * This function will write-lock the keyring's semaphore and will consume some
+ * of the user's key data quota to hold the link.
+ *
+ * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring,
+ * -EKEYREVOKED if the keyring has been revoked, -ENFILE if the keyring is
+ * full, -EDQUOT if there is insufficient key data quota remaining to add
+ * another link or -ENOMEM if there's insufficient memory.
+ *
+ * It is assumed that the caller has checked that it is permitted for a link to
+ * be made (the keyring should have Write permission and the key Link
+ * permission).
  */
 int key_link(struct key *keyring, struct key *key)
 {
-	struct keyring_list *prealloc;
+	unsigned long prealloc;
 	int ret;
 
 	key_check(keyring);
@@ -895,12 +944,24 @@
 
 	return ret;
 }
-
 EXPORT_SYMBOL(key_link);
 
-/*****************************************************************************/
-/*
- * unlink the first link to a key from a keyring
+/**
+ * key_unlink - Unlink the first link to a key from a keyring.
+ * @keyring: The keyring to remove the link from.
+ * @key: The key the link is to.
+ *
+ * Remove a link from a keyring to a key.
+ *
+ * This function will write-lock the keyring's semaphore.
+ *
+ * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, -ENOENT if
+ * the key isn't linked to by the keyring or -ENOMEM if there's insufficient
+ * memory.
+ *
+ * It is assumed that the caller has checked that it is permitted for a link to
+ * be removed (the keyring should have Write permission; no permissions are
+ * required on the key).
  */
 int key_unlink(struct key *keyring, struct key *key)
 {
@@ -968,15 +1029,12 @@
 	ret = -ENOMEM;
 	up_write(&keyring->sem);
 	goto error;
-
-} /* end key_unlink() */
-
+}
 EXPORT_SYMBOL(key_unlink);
 
-/*****************************************************************************/
 /*
- * dispose of a keyring list after the RCU grace period, releasing the keys it
- * links to
+ * Dispose of a keyring list after the RCU grace period, releasing the keys it
+ * links to.
  */
 static void keyring_clear_rcu_disposal(struct rcu_head *rcu)
 {
@@ -989,13 +1047,15 @@
 		key_put(klist->keys[loop]);
 
 	kfree(klist);
+}
 
-} /* end keyring_clear_rcu_disposal() */
-
-/*****************************************************************************/
-/*
- * clear the specified process keyring
- * - implements keyctl(KEYCTL_CLEAR)
+/**
+ * keyring_clear - Clear a keyring
+ * @keyring: The keyring to clear.
+ *
+ * Clear the contents of the specified keyring.
+ *
+ * Returns 0 if successful or -ENOTDIR if the keyring isn't a keyring.
  */
 int keyring_clear(struct key *keyring)
 {
@@ -1027,15 +1087,13 @@
 	}
 
 	return ret;
-
-} /* end keyring_clear() */
-
+}
 EXPORT_SYMBOL(keyring_clear);
 
-/*****************************************************************************/
 /*
- * dispose of the links from a revoked keyring
- * - called with the key sem write-locked
+ * Dispose of the links from a revoked keyring.
+ *
+ * This is called with the key sem write-locked.
  */
 static void keyring_revoke(struct key *keyring)
 {
@@ -1050,11 +1108,10 @@
 		rcu_assign_pointer(keyring->payload.subscriptions, NULL);
 		call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
 	}
-
-} /* end keyring_revoke() */
+}
 
 /*
- * Determine whether a key is dead
+ * Determine whether a key is dead.
  */
 static bool key_is_dead(struct key *key, time_t limit)
 {
@@ -1063,7 +1120,12 @@
 }
 
 /*
- * Collect garbage from the contents of a keyring
+ * Collect garbage from the contents of a keyring, replacing the old list with
+ * a new one with the pointers all shuffled down.
+ *
+ * Dead keys are classed as oned that are flagged as being dead or are revoked,
+ * expired or negative keys that were revoked or expired before the specified
+ * limit.
  */
 void keyring_gc(struct key *keyring, time_t limit)
 {
diff --git a/security/keys/permission.c b/security/keys/permission.c
index 2864550..c35b522 100644
--- a/security/keys/permission.c
+++ b/security/keys/permission.c
@@ -1,4 +1,4 @@
-/* permission.c: key permission determination
+/* Key permission checking
  *
  * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
@@ -13,18 +13,19 @@
 #include <linux/security.h>
 #include "internal.h"
 
-/*****************************************************************************/
 /**
  * key_task_permission - Check a key can be used
- * @key_ref: The key to check
- * @cred: The credentials to use
- * @perm: The permissions to check for
+ * @key_ref: The key to check.
+ * @cred: The credentials to use.
+ * @perm: The permissions to check for.
  *
  * Check to see whether permission is granted to use a key in the desired way,
  * but permit the security modules to override.
  *
- * The caller must hold either a ref on cred or must hold the RCU readlock or a
- * spinlock.
+ * The caller must hold either a ref on cred or must hold the RCU readlock.
+ *
+ * Returns 0 if successful, -EACCES if access is denied based on the
+ * permissions bits or the LSM check.
  */
 int key_task_permission(const key_ref_t key_ref, const struct cred *cred,
 			key_perm_t perm)
@@ -79,14 +80,16 @@
 
 	/* let LSM be the final arbiter */
 	return security_key_permission(key_ref, cred, perm);
-
-} /* end key_task_permission() */
-
+}
 EXPORT_SYMBOL(key_task_permission);
 
-/*****************************************************************************/
-/*
- * validate a key
+/**
+ * key_validate - Validate a key.
+ * @key: The key to be validated.
+ *
+ * Check that a key is valid, returning 0 if the key is okay, -EKEYREVOKED if
+ * the key's type has been removed or if the key has been revoked or
+ * -EKEYEXPIRED if the key has expired.
  */
 int key_validate(struct key *key)
 {
@@ -111,7 +114,5 @@
 
 error:
 	return ret;
-
-} /* end key_validate() */
-
+}
 EXPORT_SYMBOL(key_validate);
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 7037396..525cf8a 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -1,4 +1,4 @@
-/* proc.c: proc files for key database enumeration
+/* procfs files for key database enumeration
  *
  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
@@ -60,9 +60,8 @@
 	.release	= seq_release,
 };
 
-/*****************************************************************************/
 /*
- * declare the /proc files
+ * Declare the /proc files.
  */
 static int __init key_proc_init(void)
 {
@@ -79,14 +78,13 @@
 		panic("Cannot create /proc/key-users\n");
 
 	return 0;
-
-} /* end key_proc_init() */
+}
 
 __initcall(key_proc_init);
 
-/*****************************************************************************/
 /*
- * implement "/proc/keys" to provides a list of the keys on the system
+ * Implement "/proc/keys" to provide a list of the keys on the system that
+ * grant View permission to the caller.
  */
 #ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
 
@@ -293,9 +291,9 @@
 	return __key_user_next(n);
 }
 
-/*****************************************************************************/
 /*
- * implement "/proc/key-users" to provides a list of the key users
+ * Implement "/proc/key-users" to provides a list of the key users and their
+ * quotas.
  */
 static int proc_key_users_open(struct inode *inode, struct file *file)
 {
@@ -351,5 +349,4 @@
 		   maxbytes);
 
 	return 0;
-
 }
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 504bdd2..930634e 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -1,4 +1,4 @@
-/* Management of a process's keyrings
+/* Manage a process's keyrings
  *
  * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
@@ -21,13 +21,13 @@
 #include <asm/uaccess.h>
 #include "internal.h"
 
-/* session keyring create vs join semaphore */
+/* Session keyring create vs join semaphore */
 static DEFINE_MUTEX(key_session_mutex);
 
-/* user keyring creation semaphore */
+/* User keyring creation semaphore */
 static DEFINE_MUTEX(key_user_keyring_mutex);
 
-/* the root user's tracking struct */
+/* The root user's tracking struct */
 struct key_user root_key_user = {
 	.usage		= ATOMIC_INIT(3),
 	.cons_lock	= __MUTEX_INITIALIZER(root_key_user.cons_lock),
@@ -38,9 +38,8 @@
 	.user_ns	= &init_user_ns,
 };
 
-/*****************************************************************************/
 /*
- * install user and user session keyrings for a particular UID
+ * Install the user and user session keyrings for the current process's UID.
  */
 int install_user_keyrings(void)
 {
@@ -122,7 +121,8 @@
 }
 
 /*
- * install a fresh thread keyring directly to new credentials
+ * Install a fresh thread keyring directly to new credentials.  This keyring is
+ * allowed to overrun the quota.
  */
 int install_thread_keyring_to_cred(struct cred *new)
 {
@@ -138,7 +138,7 @@
 }
 
 /*
- * install a fresh thread keyring, discarding the old one
+ * Install a fresh thread keyring, discarding the old one.
  */
 static int install_thread_keyring(void)
 {
@@ -161,9 +161,10 @@
 }
 
 /*
- * install a process keyring directly to a credentials struct
- * - returns -EEXIST if there was already a process keyring, 0 if one installed,
- *   and other -ve on any other error
+ * Install a process keyring directly to a credentials struct.
+ *
+ * Returns -EEXIST if there was already a process keyring, 0 if one installed,
+ * and other value on any other error
  */
 int install_process_keyring_to_cred(struct cred *new)
 {
@@ -192,8 +193,11 @@
 }
 
 /*
- * make sure a process keyring is installed
- * - we
+ * Make sure a process keyring is installed for the current process.  The
+ * existing process keyring is not replaced.
+ *
+ * Returns 0 if there is a process keyring by the end of this function, some
+ * error otherwise.
  */
 static int install_process_keyring(void)
 {
@@ -214,7 +218,7 @@
 }
 
 /*
- * install a session keyring directly to a credentials struct
+ * Install a session keyring directly to a credentials struct.
  */
 int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
 {
@@ -254,8 +258,8 @@
 }
 
 /*
- * install a session keyring, discarding the old one
- * - if a keyring is not supplied, an empty one is invented
+ * Install a session keyring, discarding the old one.  If a keyring is not
+ * supplied, an empty one is invented.
  */
 static int install_session_keyring(struct key *keyring)
 {
@@ -275,9 +279,8 @@
 	return commit_creds(new);
 }
 
-/*****************************************************************************/
 /*
- * the filesystem user ID changed
+ * Handle the fsuid changing.
  */
 void key_fsuid_changed(struct task_struct *tsk)
 {
@@ -288,12 +291,10 @@
 		tsk->cred->thread_keyring->uid = tsk->cred->fsuid;
 		up_write(&tsk->cred->thread_keyring->sem);
 	}
+}
 
-} /* end key_fsuid_changed() */
-
-/*****************************************************************************/
 /*
- * the filesystem group ID changed
+ * Handle the fsgid changing.
  */
 void key_fsgid_changed(struct task_struct *tsk)
 {
@@ -304,16 +305,28 @@
 		tsk->cred->thread_keyring->gid = tsk->cred->fsgid;
 		up_write(&tsk->cred->thread_keyring->sem);
 	}
+}
 
-} /* end key_fsgid_changed() */
-
-/*****************************************************************************/
 /*
- * search only my process keyrings for the first matching key
- * - we use the supplied match function to see if the description (or other
- *   feature of interest) matches
- * - we return -EAGAIN if we didn't find any matching key
- * - we return -ENOKEY if we found only negative matching keys
+ * Search the process keyrings attached to the supplied cred for the first
+ * matching key.
+ *
+ * The search criteria are the type and the match function.  The description is
+ * given to the match function as a parameter, but doesn't otherwise influence
+ * the search.  Typically the match function will compare the description
+ * parameter to the key's description.
+ *
+ * This can only search keyrings that grant Search permission to the supplied
+ * credentials.  Keyrings linked to searched keyrings will also be searched if
+ * they grant Search permission too.  Keys can only be found if they grant
+ * Search permission to the credentials.
+ *
+ * Returns a pointer to the key with the key usage count incremented if
+ * successful, -EAGAIN if we didn't find any matching key or -ENOKEY if we only
+ * matched negative keys.
+ *
+ * In the case of a successful return, the possession attribute is set on the
+ * returned key reference.
  */
 key_ref_t search_my_process_keyrings(struct key_type *type,
 				     const void *description,
@@ -428,13 +441,13 @@
 	return key_ref;
 }
 
-/*****************************************************************************/
 /*
- * search the process keyrings for the first matching key
- * - we use the supplied match function to see if the description (or other
- *   feature of interest) matches
- * - we return -EAGAIN if we didn't find any matching key
- * - we return -ENOKEY if we found only negative matching keys
+ * Search the process keyrings attached to the supplied cred for the first
+ * matching key in the manner of search_my_process_keyrings(), but also search
+ * the keys attached to the assumed authorisation key using its credentials if
+ * one is available.
+ *
+ * Return same as search_my_process_keyrings().
  */
 key_ref_t search_process_keyrings(struct key_type *type,
 				  const void *description,
@@ -489,24 +502,33 @@
 
 found:
 	return key_ref;
+}
 
-} /* end search_process_keyrings() */
-
-/*****************************************************************************/
 /*
- * see if the key we're looking at is the target key
+ * See if the key we're looking at is the target key.
  */
 int lookup_user_key_possessed(const struct key *key, const void *target)
 {
 	return key == target;
+}
 
-} /* end lookup_user_key_possessed() */
-
-/*****************************************************************************/
 /*
- * lookup a key given a key ID from userspace with a given permissions mask
- * - don't create special keyrings unless so requested
- * - partially constructed keys aren't found unless requested
+ * Look up a key ID given us by userspace with a given permissions mask to get
+ * the key it refers to.
+ *
+ * Flags can be passed to request that special keyrings be created if referred
+ * to directly, to permit partially constructed keys to be found and to skip
+ * validity and permission checks on the found key.
+ *
+ * Returns a pointer to the key with an incremented usage count if successful;
+ * -EINVAL if the key ID is invalid; -ENOKEY if the key ID does not correspond
+ * to a key or the best found key was a negative key; -EKEYREVOKED or
+ * -EKEYEXPIRED if the best found key was revoked or expired; -EACCES if the
+ * found key doesn't grant the requested permit or the LSM denied access to it;
+ * or -ENOMEM if a special keyring couldn't be created.
+ *
+ * In the case of a successful return, the possession attribute is set on the
+ * returned key reference.
  */
 key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
 			  key_perm_t perm)
@@ -711,15 +733,18 @@
 reget_creds:
 	put_cred(cred);
 	goto try_again;
+}
 
-} /* end lookup_user_key() */
-
-/*****************************************************************************/
 /*
- * join the named keyring as the session keyring if possible, or attempt to
- * create a new one of that name if not
- * - if the name is NULL, an empty anonymous keyring is installed instead
- * - named session keyring joining is done with a semaphore held
+ * Join the named keyring as the session keyring if possible else attempt to
+ * create a new one of that name and join that.
+ *
+ * If the name is NULL, an empty anonymous keyring will be installed as the
+ * session keyring.
+ *
+ * Named session keyrings are joined with a semaphore held to prevent the
+ * keyrings from going away whilst the attempt is made to going them and also
+ * to prevent a race in creating compatible session keyrings.
  */
 long join_session_keyring(const char *name)
 {
@@ -791,8 +816,8 @@
 }
 
 /*
- * Replace a process's session keyring when that process resumes userspace on
- * behalf of one of its children
+ * Replace a process's session keyring on behalf of one of its children when
+ * the target  process is about to resume userspace execution.
  */
 void key_replace_session_keyring(void)
 {
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 0ea52d2..a3dc0d4 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -39,8 +39,14 @@
 	return signal_pending(current) ? -ERESTARTSYS : 0;
 }
 
-/*
- * call to complete the construction of a key
+/**
+ * complete_request_key - Complete the construction of a key.
+ * @cons: The key construction record.
+ * @error: The success or failute of the construction.
+ *
+ * Complete the attempt to construct a key.  The key will be negated
+ * if an error is indicated.  The authorisation key will be revoked
+ * unconditionally.
  */
 void complete_request_key(struct key_construction *cons, int error)
 {
@@ -58,23 +64,33 @@
 }
 EXPORT_SYMBOL(complete_request_key);
 
+/*
+ * Initialise a usermode helper that is going to have a specific session
+ * keyring.
+ *
+ * This is called in context of freshly forked kthread before kernel_execve(),
+ * so we can simply install the desired session_keyring at this point.
+ */
 static int umh_keys_init(struct subprocess_info *info)
 {
 	struct cred *cred = (struct cred*)current_cred();
 	struct key *keyring = info->data;
-	/*
-	 * This is called in context of freshly forked kthread before
-	 * kernel_execve(), we can just change our ->session_keyring.
-	 */
+
 	return install_session_keyring_to_cred(cred, keyring);
 }
 
+/*
+ * Clean up a usermode helper with session keyring.
+ */
 static void umh_keys_cleanup(struct subprocess_info *info)
 {
 	struct key *keyring = info->data;
 	key_put(keyring);
 }
 
+/*
+ * Call a usermode helper with a specific session keyring.
+ */
 static int call_usermodehelper_keys(char *path, char **argv, char **envp,
 			 struct key *session_keyring, enum umh_wait wait)
 {
@@ -91,7 +107,7 @@
 }
 
 /*
- * request userspace finish the construction of a key
+ * Request userspace finish the construction of a key
  * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>"
  */
 static int call_sbin_request_key(struct key_construction *cons,
@@ -198,8 +214,9 @@
 }
 
 /*
- * call out to userspace for key construction
- * - we ignore program failure and go on key status instead
+ * Call out to userspace for key construction.
+ *
+ * Program failure is ignored in favour of key status.
  */
 static int construct_key(struct key *key, const void *callout_info,
 			 size_t callout_len, void *aux,
@@ -246,9 +263,10 @@
 }
 
 /*
- * get the appropriate destination keyring for the request
- * - we return whatever keyring we select with an extra reference upon it which
- *   the caller must release
+ * Get the appropriate destination keyring for the request.
+ *
+ * The keyring selected is returned with an extra reference upon it which the
+ * caller must release.
  */
 static void construct_get_dest_keyring(struct key **_dest_keyring)
 {
@@ -321,9 +339,11 @@
 }
 
 /*
- * allocate a new key in under-construction state and attempt to link it in to
- * the requested place
- * - may return a key that's already under construction instead
+ * Allocate a new key in under-construction state and attempt to link it in to
+ * the requested keyring.
+ *
+ * May return a key that's already under construction instead if there was a
+ * race between two thread calling request_key().
  */
 static int construct_alloc_key(struct key_type *type,
 			       const char *description,
@@ -332,8 +352,8 @@
 			       struct key_user *user,
 			       struct key **_key)
 {
-	struct keyring_list *prealloc;
 	const struct cred *cred = current_cred();
+	unsigned long prealloc;
 	struct key *key;
 	key_ref_t key_ref;
 	int ret;
@@ -414,7 +434,7 @@
 }
 
 /*
- * commence key construction
+ * Commence key construction.
  */
 static struct key *construct_key_and_link(struct key_type *type,
 					  const char *description,
@@ -465,12 +485,32 @@
 	return ERR_PTR(ret);
 }
 
-/*
- * request a key
- * - search the process's keyrings
- * - check the list of keys being created or updated
- * - call out to userspace for a key if supplementary info was provided
- * - cache the key in an appropriate keyring
+/**
+ * request_key_and_link - Request a key and cache it in a keyring.
+ * @type: The type of key we want.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ * @aux: Auxiliary data for the upcall.
+ * @dest_keyring: Where to cache the key.
+ * @flags: Flags to key_alloc().
+ *
+ * A key matching the specified criteria is searched for in the process's
+ * keyrings and returned with its usage count incremented if found.  Otherwise,
+ * if callout_info is not NULL, a key will be allocated and some service
+ * (probably in userspace) will be asked to instantiate it.
+ *
+ * If successfully found or created, the key will be linked to the destination
+ * keyring if one is provided.
+ *
+ * Returns a pointer to the key if successful; -EACCES, -ENOKEY, -EKEYREVOKED
+ * or -EKEYEXPIRED if an inaccessible, negative, revoked or expired key was
+ * found; -ENOKEY if no key was found and no @callout_info was given; -EDQUOT
+ * if insufficient key quota was available to create a new key; or -ENOMEM if
+ * insufficient memory was available.
+ *
+ * If the returned key was created, then it may still be under construction,
+ * and wait_for_key_construction() should be used to wait for that to complete.
  */
 struct key *request_key_and_link(struct key_type *type,
 				 const char *description,
@@ -524,8 +564,16 @@
 	return key;
 }
 
-/*
- * wait for construction of a key to complete
+/**
+ * wait_for_key_construction - Wait for construction of a key to complete
+ * @key: The key being waited for.
+ * @intr: Whether to wait interruptibly.
+ *
+ * Wait for a key to finish being constructed.
+ *
+ * Returns 0 if successful; -ERESTARTSYS if the wait was interrupted; -ENOKEY
+ * if the key was negated; or -EKEYREVOKED or -EKEYEXPIRED if the key was
+ * revoked or expired.
  */
 int wait_for_key_construction(struct key *key, bool intr)
 {
@@ -542,12 +590,19 @@
 }
 EXPORT_SYMBOL(wait_for_key_construction);
 
-/*
- * request a key
- * - search the process's keyrings
- * - check the list of keys being created or updated
- * - call out to userspace for a key if supplementary info was provided
- * - waits uninterruptible for creation to complete
+/**
+ * request_key - Request a key and wait for construction
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found, new keys are always allocated in the user's quota,
+ * the callout_info must be a NUL-terminated string and no auxiliary data can
+ * be passed.
+ *
+ * Furthermore, it then works as wait_for_key_construction() to wait for the
+ * completion of keys undergoing construction with a non-interruptible wait.
  */
 struct key *request_key(struct key_type *type,
 			const char *description,
@@ -572,12 +627,19 @@
 }
 EXPORT_SYMBOL(request_key);
 
-/*
- * request a key with auxiliary data for the upcaller
- * - search the process's keyrings
- * - check the list of keys being created or updated
- * - call out to userspace for a key if supplementary info was provided
- * - waits uninterruptible for creation to complete
+/**
+ * request_key_with_auxdata - Request a key with auxiliary data for the upcaller
+ * @type: The type of key we want.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ * @aux: Auxiliary data for the upcall.
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found and new keys are always allocated in the user's quota.
+ *
+ * Furthermore, it then works as wait_for_key_construction() to wait for the
+ * completion of keys undergoing construction with a non-interruptible wait.
  */
 struct key *request_key_with_auxdata(struct key_type *type,
 				     const char *description,
@@ -602,10 +664,18 @@
 EXPORT_SYMBOL(request_key_with_auxdata);
 
 /*
- * request a key (allow async construction)
- * - search the process's keyrings
- * - check the list of keys being created or updated
- * - call out to userspace for a key if supplementary info was provided
+ * request_key_async - Request a key (allow async construction)
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found, new keys are always allocated in the user's quota and
+ * no auxiliary data can be passed.
+ *
+ * The caller should call wait_for_key_construction() to wait for the
+ * completion of the returned key if it is still undergoing construction.
  */
 struct key *request_key_async(struct key_type *type,
 			      const char *description,
@@ -620,9 +690,17 @@
 
 /*
  * request a key with auxiliary data for the upcaller (allow async construction)
- * - search the process's keyrings
- * - check the list of keys being created or updated
- * - call out to userspace for a key if supplementary info was provided
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ * @aux: Auxiliary data for the upcall.
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found and new keys are always allocated in the user's quota.
+ *
+ * The caller should call wait_for_key_construction() to wait for the
+ * completion of the returned key if it is still undergoing construction.
  */
 struct key *request_key_async_with_auxdata(struct key_type *type,
 					   const char *description,
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 8674715..6816403 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -1,4 +1,4 @@
-/* request_key_auth.c: request key authorisation controlling key def
+/* Request key authorisation token key definition.
  *
  * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
@@ -26,7 +26,7 @@
 static long request_key_auth_read(const struct key *, char __user *, size_t);
 
 /*
- * the request-key authorisation key type definition
+ * The request-key authorisation key type definition.
  */
 struct key_type key_type_request_key_auth = {
 	.name		= ".request_key_auth",
@@ -38,9 +38,8 @@
 	.read		= request_key_auth_read,
 };
 
-/*****************************************************************************/
 /*
- * instantiate a request-key authorisation key
+ * Instantiate a request-key authorisation key.
  */
 static int request_key_auth_instantiate(struct key *key,
 					const void *data,
@@ -48,12 +47,10 @@
 {
 	key->payload.data = (struct request_key_auth *) data;
 	return 0;
+}
 
-} /* end request_key_auth_instantiate() */
-
-/*****************************************************************************/
 /*
- * reading a request-key authorisation key retrieves the callout information
+ * Describe an authorisation token.
  */
 static void request_key_auth_describe(const struct key *key,
 				      struct seq_file *m)
@@ -63,12 +60,10 @@
 	seq_puts(m, "key:");
 	seq_puts(m, key->description);
 	seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
+}
 
-} /* end request_key_auth_describe() */
-
-/*****************************************************************************/
 /*
- * read the callout_info data
+ * Read the callout_info data (retrieves the callout information).
  * - the key's semaphore is read-locked
  */
 static long request_key_auth_read(const struct key *key,
@@ -91,13 +86,12 @@
 	}
 
 	return ret;
+}
 
-} /* end request_key_auth_read() */
-
-/*****************************************************************************/
 /*
- * handle revocation of an authorisation token key
- * - called with the key sem write-locked
+ * Handle revocation of an authorisation token key.
+ *
+ * Called with the key sem write-locked.
  */
 static void request_key_auth_revoke(struct key *key)
 {
@@ -109,12 +103,10 @@
 		put_cred(rka->cred);
 		rka->cred = NULL;
 	}
+}
 
-} /* end request_key_auth_revoke() */
-
-/*****************************************************************************/
 /*
- * destroy an instantiation authorisation token key
+ * Destroy an instantiation authorisation token key.
  */
 static void request_key_auth_destroy(struct key *key)
 {
@@ -131,13 +123,11 @@
 	key_put(rka->dest_keyring);
 	kfree(rka->callout_info);
 	kfree(rka);
+}
 
-} /* end request_key_auth_destroy() */
-
-/*****************************************************************************/
 /*
- * create an authorisation token for /sbin/request-key or whoever to gain
- * access to the caller's security data
+ * Create an authorisation token for /sbin/request-key or whoever to gain
+ * access to the caller's security data.
  */
 struct key *request_key_auth_new(struct key *target, const void *callout_info,
 				 size_t callout_len, struct key *dest_keyring)
@@ -228,12 +218,10 @@
 	kfree(rka);
 	kleave("= %d", ret);
 	return ERR_PTR(ret);
+}
 
-} /* end request_key_auth_new() */
-
-/*****************************************************************************/
 /*
- * see if an authorisation key is associated with a particular key
+ * See if an authorisation key is associated with a particular key.
  */
 static int key_get_instantiation_authkey_match(const struct key *key,
 					       const void *_id)
@@ -242,16 +230,11 @@
 	key_serial_t id = (key_serial_t)(unsigned long) _id;
 
 	return rka->target_key->serial == id;
+}
 
-} /* end key_get_instantiation_authkey_match() */
-
-/*****************************************************************************/
 /*
- * get the authorisation key for instantiation of a specific key if attached to
- * the current process's keyrings
- * - this key is inserted into a keyring and that is set as /sbin/request-key's
- *   session keyring
- * - a target_id of zero specifies any valid token
+ * Search the current process's keyrings for the authorisation key for
+ * instantiation of a key.
  */
 struct key *key_get_instantiation_authkey(key_serial_t target_id)
 {
@@ -278,5 +261,4 @@
 
 error:
 	return authkey;
-
-} /* end key_get_instantiation_authkey() */
+}
diff --git a/security/keys/trusted_defined.c b/security/keys/trusted.c
similarity index 97%
rename from security/keys/trusted_defined.c
rename to security/keys/trusted.c
index 975e9f2..83fc92e 100644
--- a/security/keys/trusted_defined.c
+++ b/security/keys/trusted.c
@@ -29,7 +29,7 @@
 #include <linux/tpm.h>
 #include <linux/tpm_command.h>
 
-#include "trusted_defined.h"
+#include "trusted.h"
 
 static const char hmac_alg[] = "hmac(sha1)";
 static const char hash_alg[] = "sha1";
@@ -101,11 +101,13 @@
 		if (dlen == 0)
 			break;
 		data = va_arg(argp, unsigned char *);
-		if (data == NULL)
-			return -EINVAL;
+		if (data == NULL) {
+			ret = -EINVAL;
+			break;
+		}
 		ret = crypto_shash_update(&sdesc->shash, data, dlen);
 		if (ret < 0)
-			goto out;
+			break;
 	}
 	va_end(argp);
 	if (!ret)
@@ -146,14 +148,17 @@
 		if (dlen == 0)
 			break;
 		data = va_arg(argp, unsigned char *);
-		ret = crypto_shash_update(&sdesc->shash, data, dlen);
-		if (ret < 0) {
-			va_end(argp);
-			goto out;
+		if (!data) {
+			ret = -EINVAL;
+			break;
 		}
+		ret = crypto_shash_update(&sdesc->shash, data, dlen);
+		if (ret < 0)
+			break;
 	}
 	va_end(argp);
-	ret = crypto_shash_final(&sdesc->shash, paramdigest);
+	if (!ret)
+		ret = crypto_shash_final(&sdesc->shash, paramdigest);
 	if (!ret)
 		ret = TSS_rawhmac(digest, key, keylen, SHA1_DIGEST_SIZE,
 				  paramdigest, TPM_NONCE_SIZE, h1,
@@ -222,13 +227,12 @@
 			break;
 		dpos = va_arg(argp, unsigned int);
 		ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen);
-		if (ret < 0) {
-			va_end(argp);
-			goto out;
-		}
+		if (ret < 0)
+			break;
 	}
 	va_end(argp);
-	ret = crypto_shash_final(&sdesc->shash, paramdigest);
+	if (!ret)
+		ret = crypto_shash_final(&sdesc->shash, paramdigest);
 	if (ret < 0)
 		goto out;
 
@@ -316,13 +320,12 @@
 			break;
 		dpos = va_arg(argp, unsigned int);
 		ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen);
-		if (ret < 0) {
-			va_end(argp);
-			goto out;
-		}
+		if (ret < 0)
+			break;
 	}
 	va_end(argp);
-	ret = crypto_shash_final(&sdesc->shash, paramdigest);
+	if (!ret)
+		ret = crypto_shash_final(&sdesc->shash, paramdigest);
 	if (ret < 0)
 		goto out;
 
@@ -511,7 +514,7 @@
 	/* get session for sealing key */
 	ret = osap(tb, &sess, keyauth, keytype, keyhandle);
 	if (ret < 0)
-		return ret;
+		goto out;
 	dump_sess(&sess);
 
 	/* calculate encrypted authorization value */
@@ -519,11 +522,11 @@
 	memcpy(td->xorwork + SHA1_DIGEST_SIZE, sess.enonce, SHA1_DIGEST_SIZE);
 	ret = TSS_sha1(td->xorwork, SHA1_DIGEST_SIZE * 2, td->xorhash);
 	if (ret < 0)
-		return ret;
+		goto out;
 
 	ret = tpm_get_random(tb, td->nonceodd, TPM_NONCE_SIZE);
 	if (ret < 0)
-		return ret;
+		goto out;
 	ordinal = htonl(TPM_ORD_SEAL);
 	datsize = htonl(datalen);
 	pcrsize = htonl(pcrinfosize);
@@ -552,7 +555,7 @@
 				   &datsize, datalen, data, 0, 0);
 	}
 	if (ret < 0)
-		return ret;
+		goto out;
 
 	/* build and send the TPM request packet */
 	INIT_BUF(tb);
@@ -572,7 +575,7 @@
 
 	ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
 	if (ret < 0)
-		return ret;
+		goto out;
 
 	/* calculate the size of the returned Blob */
 	sealinfosize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t));
@@ -591,6 +594,8 @@
 		memcpy(blob, tb->data + TPM_DATA_OFFSET, storedsize);
 		*bloblen = storedsize;
 	}
+out:
+	kfree(td);
 	return ret;
 }
 
@@ -1027,6 +1032,7 @@
 	ret = datablob_parse(datablob, new_p, new_o);
 	if (ret != Opt_update) {
 		ret = -EINVAL;
+		kfree(new_p);
 		goto out;
 	}
 	/* copy old key values, and reseal with new pcrs */
diff --git a/security/keys/trusted_defined.h b/security/keys/trusted.h
similarity index 100%
rename from security/keys/trusted_defined.h
rename to security/keys/trusted.h
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index e9aa079..02807fb 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -35,7 +35,6 @@
 
 EXPORT_SYMBOL_GPL(key_type_user);
 
-/*****************************************************************************/
 /*
  * instantiate a user defined key
  */
@@ -65,12 +64,10 @@
 
 error:
 	return ret;
-
-} /* end user_instantiate() */
+}
 
 EXPORT_SYMBOL_GPL(user_instantiate);
 
-/*****************************************************************************/
 /*
  * dispose of the old data from an updated user defined key
  */
@@ -81,10 +78,8 @@
 	upayload = container_of(rcu, struct user_key_payload, rcu);
 
 	kfree(upayload);
+}
 
-} /* end user_update_rcu_disposal() */
-
-/*****************************************************************************/
 /*
  * update a user defined key
  * - the key's semaphore is write-locked
@@ -123,24 +118,20 @@
 
 error:
 	return ret;
-
-} /* end user_update() */
+}
 
 EXPORT_SYMBOL_GPL(user_update);
 
-/*****************************************************************************/
 /*
  * match users on their name
  */
 int user_match(const struct key *key, const void *description)
 {
 	return strcmp(key->description, description) == 0;
-
-} /* end user_match() */
+}
 
 EXPORT_SYMBOL_GPL(user_match);
 
-/*****************************************************************************/
 /*
  * dispose of the links from a revoked keyring
  * - called with the key sem write-locked
@@ -156,12 +147,10 @@
 		rcu_assign_pointer(key->payload.data, NULL);
 		call_rcu(&upayload->rcu, user_update_rcu_disposal);
 	}
-
-} /* end user_revoke() */
+}
 
 EXPORT_SYMBOL(user_revoke);
 
-/*****************************************************************************/
 /*
  * dispose of the data dangling from the corpse of a user key
  */
@@ -170,12 +159,10 @@
 	struct user_key_payload *upayload = key->payload.data;
 
 	kfree(upayload);
-
-} /* end user_destroy() */
+}
 
 EXPORT_SYMBOL_GPL(user_destroy);
 
-/*****************************************************************************/
 /*
  * describe the user key
  */
@@ -184,12 +171,10 @@
 	seq_puts(m, key->description);
 
 	seq_printf(m, ": %u", key->datalen);
-
-} /* end user_describe() */
+}
 
 EXPORT_SYMBOL_GPL(user_describe);
 
-/*****************************************************************************/
 /*
  * read the key data
  * - the key's semaphore is read-locked
@@ -213,7 +198,6 @@
 	}
 
 	return ret;
-
-} /* end user_read() */
+}
 
 EXPORT_SYMBOL_GPL(user_read);
diff --git a/security/security.c b/security/security.c
index 739e403..7b7308a 100644
--- a/security/security.c
+++ b/security/security.c
@@ -154,10 +154,9 @@
 				    effective, inheritable, permitted);
 }
 
-int security_capable(int cap)
+int security_capable(const struct cred *cred, int cap)
 {
-	return security_ops->capable(current, current_cred(), cap,
-				     SECURITY_CAP_AUDIT);
+	return security_ops->capable(current, cred, cap, SECURITY_CAP_AUDIT);
 }
 
 int security_real_capable(struct task_struct *tsk, int cap)
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index e276eb4..c8d6992 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3198,7 +3198,11 @@
 {
 	struct task_security_struct *tsec = cred->security;
 
-	BUG_ON((unsigned long) cred->security < PAGE_SIZE);
+	/*
+	 * cred->security == NULL if security_cred_alloc_blank() or
+	 * security_prepare_creds() returned an error.
+	 */
+	BUG_ON(cred->security && (unsigned long) cred->security < PAGE_SIZE);
 	cred->security = (void *) 0x7UL;
 	kfree(tsec);
 }
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index c3f845c..a533732 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -178,7 +178,7 @@
 	p->bool_val_to_struct = (struct cond_bool_datum **)
 		kmalloc(p->p_bools.nprim * sizeof(struct cond_bool_datum *), GFP_KERNEL);
 	if (!p->bool_val_to_struct)
-		return -1;
+		return -ENOMEM;
 	return 0;
 }
 
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index be9de38..5736356 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -501,8 +501,8 @@
 	if (rc)
 		goto out;
 
-	rc = -ENOMEM;
-	if (cond_init_bool_indexes(p))
+	rc = cond_init_bool_indexes(p);
+	if (rc)
 		goto out;
 
 	for (i = 0; i < SYM_NUM; i++) {
diff --git a/sound/arm/aaci.c b/sound/arm/aaci.c
index 91acc9a..7c1fc64 100644
--- a/sound/arm/aaci.c
+++ b/sound/arm/aaci.c
@@ -30,6 +30,8 @@
 
 #define DRIVER_NAME	"aaci-pl041"
 
+#define FRAME_PERIOD_US	21
+
 /*
  * PM support is not complete.  Turn it off.
  */
@@ -48,7 +50,11 @@
 	if (v & SLFR_1RXV)
 		readl(aaci->base + AACI_SL1RX);
 
-	writel(maincr, aaci->base + AACI_MAINCR);
+	if (maincr != readl(aaci->base + AACI_MAINCR)) {
+		writel(maincr, aaci->base + AACI_MAINCR);
+		readl(aaci->base + AACI_MAINCR);
+		udelay(1);
+	}
 }
 
 /*
@@ -64,8 +70,8 @@
 			    unsigned short val)
 {
 	struct aaci *aaci = ac97->private_data;
+	int timeout;
 	u32 v;
-	int timeout = 5000;
 
 	if (ac97->num >= 4)
 		return;
@@ -81,14 +87,17 @@
 	writel(val << 4, aaci->base + AACI_SL2TX);
 	writel(reg << 12, aaci->base + AACI_SL1TX);
 
-	/*
-	 * Wait for the transmission of both slots to complete.
-	 */
+	/* Initially, wait one frame period */
+	udelay(FRAME_PERIOD_US);
+
+	/* And then wait an additional eight frame periods for it to be sent */
+	timeout = FRAME_PERIOD_US * 8;
 	do {
+		udelay(1);
 		v = readl(aaci->base + AACI_SLFR);
 	} while ((v & (SLFR_1TXB|SLFR_2TXB)) && --timeout);
 
-	if (!timeout)
+	if (v & (SLFR_1TXB|SLFR_2TXB))
 		dev_err(&aaci->dev->dev,
 			"timeout waiting for write to complete\n");
 
@@ -101,9 +110,8 @@
 static unsigned short aaci_ac97_read(struct snd_ac97 *ac97, unsigned short reg)
 {
 	struct aaci *aaci = ac97->private_data;
+	int timeout, retries = 10;
 	u32 v;
-	int timeout = 5000;
-	int retries = 10;
 
 	if (ac97->num >= 4)
 		return ~0;
@@ -117,35 +125,34 @@
 	 */
 	writel((reg << 12) | (1 << 19), aaci->base + AACI_SL1TX);
 
-	/*
-	 * Wait for the transmission to complete.
-	 */
+	/* Initially, wait one frame period */
+	udelay(FRAME_PERIOD_US);
+
+	/* And then wait an additional eight frame periods for it to be sent */
+	timeout = FRAME_PERIOD_US * 8;
 	do {
+		udelay(1);
 		v = readl(aaci->base + AACI_SLFR);
 	} while ((v & SLFR_1TXB) && --timeout);
 
-	if (!timeout) {
+	if (v & SLFR_1TXB) {
 		dev_err(&aaci->dev->dev, "timeout on slot 1 TX busy\n");
 		v = ~0;
 		goto out;
 	}
 
-	/*
-	 * Give the AC'97 codec more than enough time
-	 * to respond. (42us = ~2 frames at 48kHz.)
-	 */
-	udelay(42);
+	/* Now wait for the response frame */
+	udelay(FRAME_PERIOD_US);
 
-	/*
-	 * Wait for slot 2 to indicate data.
-	 */
-	timeout = 5000;
+	/* And then wait an additional eight frame periods for data */
+	timeout = FRAME_PERIOD_US * 8;
 	do {
+		udelay(1);
 		cond_resched();
 		v = readl(aaci->base + AACI_SLFR) & (SLFR_1RXV|SLFR_2RXV);
 	} while ((v != (SLFR_1RXV|SLFR_2RXV)) && --timeout);
 
-	if (!timeout) {
+	if (v != (SLFR_1RXV|SLFR_2RXV)) {
 		dev_err(&aaci->dev->dev, "timeout on RX valid\n");
 		v = ~0;
 		goto out;
@@ -179,6 +186,7 @@
 	int timeout = 5000;
 
 	do {
+		udelay(1);
 		val = readl(aacirun->base + AACI_SR);
 	} while (val & mask && timeout--);
 }
@@ -874,7 +882,7 @@
 	 * Give the AC'97 codec more than enough time
 	 * to wake up. (42us = ~2 frames at 48kHz.)
 	 */
-	udelay(42);
+	udelay(FRAME_PERIOD_US * 2);
 
 	ret = snd_ac97_bus(aaci->card, 0, &aaci_bus_ops, aaci, &ac97_bus);
 	if (ret)
@@ -989,6 +997,8 @@
 	 * disabling the channel doesn't clear the FIFO.
 	 */
 	writel(aaci->maincr & ~MAINCR_IE, aaci->base + AACI_MAINCR);
+	readl(aaci->base + AACI_MAINCR);
+	udelay(1);
 	writel(aaci->maincr, aaci->base + AACI_MAINCR);
 
 	/*
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index 10c3a87..b310702 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -33,9 +33,12 @@
 #include <linux/dw_dmac.h>
 
 #include <mach/cpu.h>
-#include <mach/hardware.h>
 #include <mach/gpio.h>
 
+#ifdef CONFIG_ARCH_AT91
+#include <mach/hardware.h>
+#endif
+
 #include "ac97c.h"
 
 enum {
diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
index 7730575..b8b31c4 100644
--- a/sound/core/hrtimer.c
+++ b/sound/core/hrtimer.c
@@ -45,12 +45,13 @@
 {
 	struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt);
 	struct snd_timer *t = stime->timer;
+	unsigned long oruns;
 
 	if (!atomic_read(&stime->running))
 		return HRTIMER_NORESTART;
 
-	hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
-	snd_timer_interrupt(stime->timer, t->sticks);
+	oruns = hrtimer_forward_now(hrt, ns_to_ktime(t->sticks * resolution));
+	snd_timer_interrupt(stime->timer, t->sticks * oruns);
 
 	if (!atomic_read(&stime->running))
 		return HRTIMER_NORESTART;
@@ -104,7 +105,7 @@
 }
 
 static struct snd_timer_hardware hrtimer_hw = {
-	.flags =	SNDRV_TIMER_HW_AUTO,
+	.flags =	SNDRV_TIMER_HW_AUTO | SNDRV_TIMER_HW_TASKLET,
 	.open =		snd_hrtimer_open,
 	.close =	snd_hrtimer_close,
 	.start =	snd_hrtimer_start,
diff --git a/sound/drivers/mtpav.c b/sound/drivers/mtpav.c
index da03597..5c426df 100644
--- a/sound/drivers/mtpav.c
+++ b/sound/drivers/mtpav.c
@@ -55,14 +55,13 @@
 #include <linux/err.h>
 #include <linux/platform_device.h>
 #include <linux/ioport.h>
+#include <linux/io.h>
 #include <linux/moduleparam.h>
 #include <sound/core.h>
 #include <sound/initval.h>
 #include <sound/rawmidi.h>
 #include <linux/delay.h>
 
-#include <asm/io.h>
-
 /*
  *      globals
  */
diff --git a/sound/oss/Makefile b/sound/oss/Makefile
index 96f14dc..90ffb99 100644
--- a/sound/oss/Makefile
+++ b/sound/oss/Makefile
@@ -87,7 +87,7 @@
 	$(obj)/bin2hex pss_synth < $< > $@
 else
     $(obj)/pss_boot.h:
-	(							\
+	$(Q)(							\
 	    echo 'static unsigned char * pss_synth = NULL;';	\
 	    echo 'static int pss_synthLen = 0;';		\
 	) > $@
@@ -102,7 +102,7 @@
 	$(obj)/hex2hex -i trix_boot < $< > $@
 else
     $(obj)/trix_boot.h:
-	(							\
+	$(Q)(							\
 	    echo 'static unsigned char * trix_boot = NULL;';	\
 	    echo 'static int trix_boot_len = 0;';		\
 	) > $@
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index 6117595..573594b 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -979,31 +979,25 @@
 
 	snd_azf3328_dbgcallenter();
 	switch (bitrate) {
-#define AZF_FMT_XLATE(in_freq, out_bits) \
-	do { \
-		case AZF_FREQ_ ## in_freq: \
-			freq = SOUNDFORMAT_FREQ_ ## out_bits; \
-			break; \
-	} while (0);
-	AZF_FMT_XLATE(4000, SUSPECTED_4000)
-	AZF_FMT_XLATE(4800, SUSPECTED_4800)
-	/* the AZF3328 names it "5510" for some strange reason: */
-	AZF_FMT_XLATE(5512, 5510)
-	AZF_FMT_XLATE(6620, 6620)
-	AZF_FMT_XLATE(8000, 8000)
-	AZF_FMT_XLATE(9600, 9600)
-	AZF_FMT_XLATE(11025, 11025)
-	AZF_FMT_XLATE(13240, SUSPECTED_13240)
-	AZF_FMT_XLATE(16000, 16000)
-	AZF_FMT_XLATE(22050, 22050)
-	AZF_FMT_XLATE(32000, 32000)
+	case AZF_FREQ_4000:  freq = SOUNDFORMAT_FREQ_SUSPECTED_4000; break;
+	case AZF_FREQ_4800:  freq = SOUNDFORMAT_FREQ_SUSPECTED_4800; break;
+	case AZF_FREQ_5512:
+		/* the AZF3328 names it "5510" for some strange reason */
+			     freq = SOUNDFORMAT_FREQ_5510; break;
+	case AZF_FREQ_6620:  freq = SOUNDFORMAT_FREQ_6620; break;
+	case AZF_FREQ_8000:  freq = SOUNDFORMAT_FREQ_8000; break;
+	case AZF_FREQ_9600:  freq = SOUNDFORMAT_FREQ_9600; break;
+	case AZF_FREQ_11025: freq = SOUNDFORMAT_FREQ_11025; break;
+	case AZF_FREQ_13240: freq = SOUNDFORMAT_FREQ_SUSPECTED_13240; break;
+	case AZF_FREQ_16000: freq = SOUNDFORMAT_FREQ_16000; break;
+	case AZF_FREQ_22050: freq = SOUNDFORMAT_FREQ_22050; break;
+	case AZF_FREQ_32000: freq = SOUNDFORMAT_FREQ_32000; break;
 	default:
 		snd_printk(KERN_WARNING "unknown bitrate %d, assuming 44.1kHz!\n", bitrate);
 		/* fall-through */
-	AZF_FMT_XLATE(44100, 44100)
-	AZF_FMT_XLATE(48000, 48000)
-	AZF_FMT_XLATE(66200, SUSPECTED_66200)
-#undef AZF_FMT_XLATE
+	case AZF_FREQ_44100: freq = SOUNDFORMAT_FREQ_44100; break;
+	case AZF_FREQ_48000: freq = SOUNDFORMAT_FREQ_48000; break;
+	case AZF_FREQ_66200: freq = SOUNDFORMAT_FREQ_SUSPECTED_66200; break;
 	}
 	/* val = 0xff07; 3m27.993s (65301Hz; -> 64000Hz???) hmm, 66120, 65967, 66123 */
 	/* val = 0xff09; 17m15.098s (13123,478Hz; -> 12000Hz???) hmm, 13237.2Hz? */
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index 4a66347..74b0560 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -381,7 +381,7 @@
 	snd_print_pcm_rates(a->rates, buf, sizeof(buf));
 
 	if (a->format == AUDIO_CODING_TYPE_LPCM)
-		snd_print_pcm_bits(a->sample_bits, buf2 + 8, sizeof(buf2 - 8));
+		snd_print_pcm_bits(a->sample_bits, buf2 + 8, sizeof(buf2) - 8);
 	else if (a->max_bitrate)
 		snprintf(buf2, sizeof(buf2),
 				", max bitrate = %d", a->max_bitrate);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 2e91a99..0baffcd 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2703,7 +2703,7 @@
 	if (err < 0)
 		goto out_free;
 #ifdef CONFIG_SND_HDA_PATCH_LOADER
-	if (patch[dev]) {
+	if (patch[dev] && *patch[dev]) {
 		snd_printk(KERN_ERR SFX "Applying patch firmware '%s'\n",
 			   patch[dev]);
 		err = snd_hda_load_patch(chip->bus, patch[dev]);
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 9bb030a..fbe97d3 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -85,6 +85,7 @@
 	unsigned int auto_mic;
 	int auto_mic_ext;		/* autocfg.inputs[] index for ext mic */
 	unsigned int need_dac_fix;
+	hda_nid_t slave_dig_outs[2];
 
 	/* capture */
 	unsigned int num_adc_nids;
@@ -127,6 +128,7 @@
 	unsigned int ideapad:1;
 	unsigned int thinkpad:1;
 	unsigned int hp_laptop:1;
+	unsigned int asus:1;
 
 	unsigned int ext_mic_present;
 	unsigned int recording;
@@ -352,6 +354,8 @@
 			info->stream[SNDRV_PCM_STREAM_CAPTURE].nid =
 				spec->dig_in_nid;
 		}
+		if (spec->slave_dig_outs[0])
+			codec->slave_dig_outs = spec->slave_dig_outs;
 	}
 
 	return 0;
@@ -403,10 +407,16 @@
 	struct conexant_spec *spec;
 	struct conexant_jack *jack;
 	const char *name;
-	int err;
+	int i, err;
 
 	spec = codec->spec;
 	snd_array_init(&spec->jacks, sizeof(*jack), 32);
+
+	jack = spec->jacks.list;
+	for (i = 0; i < spec->jacks.used; i++, jack++)
+		if (jack->nid == nid)
+			return 0 ; /* already present */
+
 	jack = snd_array_new(&spec->jacks);
 	name = (type == SND_JACK_HEADPHONE) ? "Headphone" : "Mic" ;
 
@@ -2100,7 +2110,7 @@
 static hda_nid_t cxt5066_dac_nids[1] = { 0x10 };
 static hda_nid_t cxt5066_adc_nids[3] = { 0x14, 0x15, 0x16 };
 static hda_nid_t cxt5066_capsrc_nids[1] = { 0x17 };
-#define CXT5066_SPDIF_OUT	0x21
+static hda_nid_t cxt5066_digout_pin_nids[2] = { 0x20, 0x22 };
 
 /* OLPC's microphone port is DC coupled for use with external sensors,
  * therefore we use a 50% mic bias in order to center the input signal with
@@ -2312,6 +2322,19 @@
 	}
 }
 
+
+/* toggle input of built-in digital mic and mic jack appropriately */
+static void cxt5066_asus_automic(struct hda_codec *codec)
+{
+	unsigned int present;
+
+	present = snd_hda_jack_detect(codec, 0x1b);
+	snd_printdd("CXT5066: external microphone present=%d\n", present);
+	snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL,
+			    present ? 1 : 0);
+}
+
+
 /* toggle input of built-in digital mic and mic jack appropriately */
 static void cxt5066_hp_laptop_automic(struct hda_codec *codec)
 {
@@ -2387,6 +2410,23 @@
 	cxt5066_update_speaker(codec);
 }
 
+/* Dispatch the right mic autoswitch function */
+static void cxt5066_automic(struct hda_codec *codec)
+{
+	struct conexant_spec *spec = codec->spec;
+
+	if (spec->dell_vostro)
+		cxt5066_vostro_automic(codec);
+	else if (spec->ideapad)
+		cxt5066_ideapad_automic(codec);
+	else if (spec->thinkpad)
+		cxt5066_thinkpad_automic(codec);
+	else if (spec->hp_laptop)
+		cxt5066_hp_laptop_automic(codec);
+	else if (spec->asus)
+		cxt5066_asus_automic(codec);
+}
+
 /* unsolicited event for jack sensing */
 static void cxt5066_olpc_unsol_event(struct hda_codec *codec, unsigned int res)
 {
@@ -2405,60 +2445,19 @@
 }
 
 /* unsolicited event for jack sensing */
-static void cxt5066_vostro_event(struct hda_codec *codec, unsigned int res)
+static void cxt5066_unsol_event(struct hda_codec *codec, unsigned int res)
 {
-	snd_printdd("CXT5066_vostro: unsol event %x (%x)\n", res, res >> 26);
+	snd_printdd("CXT5066: unsol event %x (%x)\n", res, res >> 26);
 	switch (res >> 26) {
 	case CONEXANT_HP_EVENT:
 		cxt5066_hp_automute(codec);
 		break;
 	case CONEXANT_MIC_EVENT:
-		cxt5066_vostro_automic(codec);
+		cxt5066_automic(codec);
 		break;
 	}
 }
 
-/* unsolicited event for jack sensing */
-static void cxt5066_ideapad_event(struct hda_codec *codec, unsigned int res)
-{
-	snd_printdd("CXT5066_ideapad: unsol event %x (%x)\n", res, res >> 26);
-	switch (res >> 26) {
-	case CONEXANT_HP_EVENT:
-		cxt5066_hp_automute(codec);
-		break;
-	case CONEXANT_MIC_EVENT:
-		cxt5066_ideapad_automic(codec);
-		break;
-	}
-}
-
-/* unsolicited event for jack sensing */
-static void cxt5066_hp_laptop_event(struct hda_codec *codec, unsigned int res)
-{
-	snd_printdd("CXT5066_hp_laptop: unsol event %x (%x)\n", res, res >> 26);
-	switch (res >> 26) {
-	case CONEXANT_HP_EVENT:
-		cxt5066_hp_automute(codec);
-		break;
-	case CONEXANT_MIC_EVENT:
-		cxt5066_hp_laptop_automic(codec);
-		break;
-	}
-}
-
-/* unsolicited event for jack sensing */
-static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res)
-{
-	snd_printdd("CXT5066_thinkpad: unsol event %x (%x)\n", res, res >> 26);
-	switch (res >> 26) {
-	case CONEXANT_HP_EVENT:
-		cxt5066_hp_automute(codec);
-		break;
-	case CONEXANT_MIC_EVENT:
-		cxt5066_thinkpad_automic(codec);
-		break;
-	}
-}
 
 static const struct hda_input_mux cxt5066_analog_mic_boost = {
 	.num_items = 5,
@@ -2633,6 +2632,27 @@
 	spec->recording = 0;
 }
 
+static void conexant_check_dig_outs(struct hda_codec *codec,
+				    hda_nid_t *dig_pins,
+				    int num_pins)
+{
+	struct conexant_spec *spec = codec->spec;
+	hda_nid_t *nid_loc = &spec->multiout.dig_out_nid;
+	int i;
+
+	for (i = 0; i < num_pins; i++, dig_pins++) {
+		unsigned int cfg = snd_hda_codec_get_pincfg(codec, *dig_pins);
+		if (get_defcfg_connect(cfg) == AC_JACK_PORT_NONE)
+			continue;
+		if (snd_hda_get_connections(codec, *dig_pins, nid_loc, 1) != 1)
+			continue;
+		if (spec->slave_dig_outs[0])
+			nid_loc++;
+		else
+			nid_loc = spec->slave_dig_outs;
+	}
+}
+
 static struct hda_input_mux cxt5066_capture_source = {
 	.num_items = 4,
 	.items = {
@@ -3039,20 +3059,11 @@
 /* initialize jack-sensing, too */
 static int cxt5066_init(struct hda_codec *codec)
 {
-	struct conexant_spec *spec = codec->spec;
-
 	snd_printdd("CXT5066: init\n");
 	conexant_init(codec);
 	if (codec->patch_ops.unsol_event) {
 		cxt5066_hp_automute(codec);
-		if (spec->dell_vostro)
-			cxt5066_vostro_automic(codec);
-		else if (spec->ideapad)
-			cxt5066_ideapad_automic(codec);
-		else if (spec->thinkpad)
-			cxt5066_thinkpad_automic(codec);
-		else if (spec->hp_laptop)
-			cxt5066_hp_laptop_automic(codec);
+		cxt5066_automic(codec);
 	}
 	cxt5066_set_mic_boost(codec);
 	return 0;
@@ -3080,6 +3091,7 @@
 	CXT5066_DELL_VOSTRO,	/* Dell Vostro 1015i */
 	CXT5066_IDEAPAD,	/* Lenovo IdeaPad U150 */
 	CXT5066_THINKPAD,	/* Lenovo ThinkPad T410s, others? */
+	CXT5066_ASUS,		/* Asus K52JU, Lenovo G560 - Int mic at 0x1a and Ext mic at 0x1b */
 	CXT5066_HP_LAPTOP,      /* HP Laptop */
 	CXT5066_MODELS
 };
@@ -3091,6 +3103,7 @@
 	[CXT5066_DELL_VOSTRO]	= "dell-vostro",
 	[CXT5066_IDEAPAD]	= "ideapad",
 	[CXT5066_THINKPAD]	= "thinkpad",
+	[CXT5066_ASUS]		= "asus",
 	[CXT5066_HP_LAPTOP]	= "hp-laptop",
 };
 
@@ -3102,7 +3115,9 @@
 	SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
 	SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
 	SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
-	SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_HP_LAPTOP),
+	SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS),
+	SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS),
+	SND_PCI_QUIRK(0x1043, 0x1993, "Asus U50F", CXT5066_ASUS),
 	SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD),
 	SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
 	SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
@@ -3111,7 +3126,9 @@
 	SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
 	SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
+	SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS),
  	SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
+	SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
 	SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */
 	{}
 };
@@ -3133,7 +3150,8 @@
 	spec->multiout.max_channels = 2;
 	spec->multiout.num_dacs = ARRAY_SIZE(cxt5066_dac_nids);
 	spec->multiout.dac_nids = cxt5066_dac_nids;
-	spec->multiout.dig_out_nid = CXT5066_SPDIF_OUT;
+	conexant_check_dig_outs(codec, cxt5066_digout_pin_nids,
+	    ARRAY_SIZE(cxt5066_digout_pin_nids));
 	spec->num_adc_nids = 1;
 	spec->adc_nids = cxt5066_adc_nids;
 	spec->capsrc_nids = cxt5066_capsrc_nids;
@@ -3167,17 +3185,20 @@
 		spec->num_init_verbs++;
 		spec->dell_automute = 1;
 		break;
+	case CXT5066_ASUS:
 	case CXT5066_HP_LAPTOP:
 		codec->patch_ops.init = cxt5066_init;
-		codec->patch_ops.unsol_event = cxt5066_hp_laptop_event;
+		codec->patch_ops.unsol_event = cxt5066_unsol_event;
 		spec->init_verbs[spec->num_init_verbs] =
 			cxt5066_init_verbs_hp_laptop;
 		spec->num_init_verbs++;
-		spec->hp_laptop = 1;
+		spec->hp_laptop = board_config == CXT5066_HP_LAPTOP;
+		spec->asus = board_config == CXT5066_ASUS;
 		spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
 		spec->mixers[spec->num_mixers++] = cxt5066_mixers;
 		/* no S/PDIF out */
-		spec->multiout.dig_out_nid = 0;
+		if (board_config == CXT5066_HP_LAPTOP)
+			spec->multiout.dig_out_nid = 0;
 		/* input source automatically selected */
 		spec->input_mux = NULL;
 		spec->port_d_mode = 0;
@@ -3207,7 +3228,7 @@
 		break;
 	case CXT5066_DELL_VOSTRO:
 		codec->patch_ops.init = cxt5066_init;
-		codec->patch_ops.unsol_event = cxt5066_vostro_event;
+		codec->patch_ops.unsol_event = cxt5066_unsol_event;
 		spec->init_verbs[0] = cxt5066_init_verbs_vostro;
 		spec->mixers[spec->num_mixers++] = cxt5066_mixer_master_olpc;
 		spec->mixers[spec->num_mixers++] = cxt5066_mixers;
@@ -3224,7 +3245,7 @@
 		break;
 	case CXT5066_IDEAPAD:
 		codec->patch_ops.init = cxt5066_init;
-		codec->patch_ops.unsol_event = cxt5066_ideapad_event;
+		codec->patch_ops.unsol_event = cxt5066_unsol_event;
 		spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
 		spec->mixers[spec->num_mixers++] = cxt5066_mixers;
 		spec->init_verbs[0] = cxt5066_init_verbs_ideapad;
@@ -3240,7 +3261,7 @@
 		break;
 	case CXT5066_THINKPAD:
 		codec->patch_ops.init = cxt5066_init;
-		codec->patch_ops.unsol_event = cxt5066_thinkpad_event;
+		codec->patch_ops.unsol_event = cxt5066_unsol_event;
 		spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
 		spec->mixers[spec->num_mixers++] = cxt5066_mixers;
 		spec->init_verbs[0] = cxt5066_init_verbs_thinkpad;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 2d5b83f..a587677 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -642,6 +642,7 @@
 			hdmi_ai->ver		= 0x01;
 			hdmi_ai->len		= 0x0a;
 			hdmi_ai->CC02_CT47	= channels - 1;
+			hdmi_ai->CA		= ca;
 			hdmi_checksum_audio_infoframe(hdmi_ai);
 		} else if (spec->sink_eld[i].conn_type == 1) { /* DisplayPort */
 			struct dp_audio_infoframe *dp_ai;
@@ -651,6 +652,7 @@
 			dp_ai->len		= 0x1b;
 			dp_ai->ver		= 0x11 << 2;
 			dp_ai->CC02_CT47	= channels - 1;
+			dp_ai->CA		= ca;
 		} else {
 			snd_printd("HDMI: unknown connection type at pin %d\n",
 				   pin_nid);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 269dbff..3328a25 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1721,7 +1721,9 @@
 {
 	struct alc_spec *spec = codec->spec;
 	int id = spec->fixup_id;
+#ifdef CONFIG_SND_DEBUG_VERBOSE
 	const char *modelname = spec->fixup_name;
+#endif
 	int depth = 0;
 
 	if (!spec->fixup_list)
@@ -2288,6 +2290,29 @@
 	{ } /* end */
 };
 
+static struct snd_kcontrol_new alc888_acer_aspire_4930g_mixer[] = {
+	HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
+	HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
+	HDA_CODEC_VOLUME("Surround Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
+	HDA_BIND_MUTE("Surround Playback Switch", 0x0d, 2, HDA_INPUT),
+	HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x0f, 2, 0x0,
+		HDA_OUTPUT),
+	HDA_BIND_MUTE_MONO("Center Playback Switch", 0x0f, 2, 2, HDA_INPUT),
+	HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x0f, 1, 0x0, HDA_OUTPUT),
+	HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x0f, 1, 2, HDA_INPUT),
+	HDA_CODEC_VOLUME("Side Playback Volume", 0x0e, 0x0, HDA_OUTPUT),
+	HDA_BIND_MUTE("Side Playback Switch", 0x0e, 2, HDA_INPUT),
+	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
+	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
+	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+	{ } /* end */
+};
+
+
 static struct snd_kcontrol_new alc889_acer_aspire_8930g_mixer[] = {
 	HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
@@ -10357,7 +10382,7 @@
 		.init_hook = alc_automute_amp,
 	},
 	[ALC888_ACER_ASPIRE_4930G] = {
-		.mixers = { alc888_base_mixer,
+		.mixers = { alc888_acer_aspire_4930g_mixer,
 				alc883_chmode_mixer },
 		.init_verbs = { alc883_init_verbs, alc880_gpio1_init_verbs,
 				alc888_acer_aspire_4930g_verbs },
@@ -10930,9 +10955,6 @@
 	return 0;
 }
 
-static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec,
-					     const struct auto_pin_cfg *cfg);
-
 /* almost identical with ALC880 parser... */
 static int alc882_parse_auto_config(struct hda_codec *codec)
 {
@@ -10950,10 +10972,7 @@
 	err = alc880_auto_fill_dac_nids(spec, &spec->autocfg);
 	if (err < 0)
 		return err;
-	if (codec->vendor_id == 0x10ec0887)
-		err = alc861vd_auto_create_multi_out_ctls(spec, &spec->autocfg);
-	else
-		err = alc880_auto_create_multi_out_ctls(spec, &spec->autocfg);
+	err = alc880_auto_create_multi_out_ctls(spec, &spec->autocfg);
 	if (err < 0)
 		return err;
 	err = alc880_auto_create_extra_out(spec, spec->autocfg.hp_pins[0],
@@ -12635,6 +12654,8 @@
 			   ALC262_HP_BPC),
 	SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1300, "HP xw series",
 			   ALC262_HP_BPC),
+	SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1500, "HP z series",
+			   ALC262_HP_BPC),
 	SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x1700, "HP xw series",
 			   ALC262_HP_BPC),
 	SND_PCI_QUIRK(0x103c, 0x2800, "HP D7000", ALC262_HP_BPC_D7000_WL),
@@ -14956,8 +14977,11 @@
 	SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
 	SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
 	SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
-	SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
+	SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
+	SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
+	SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
+	SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
 	SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
 	SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
 	{}
@@ -17134,7 +17158,7 @@
 #define alc861vd_idx_to_mixer_switch(nid)	((nid) + 0x0c)
 
 /* add playback controls from the parsed DAC table */
-/* Based on ALC880 version. But ALC861VD and ALC887 have separate,
+/* Based on ALC880 version. But ALC861VD has separate,
  * different NIDs for mute/unmute switch and volume control */
 static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec,
 					     const struct auto_pin_cfg *cfg)
@@ -18801,6 +18825,7 @@
 					ALC662_3ST_6ch_DIG),
 	SND_PCI_QUIRK_MASK(0x1854, 0xf000, 0x2000, "ASUS H13-200x",
 			   ALC663_ASUS_H13),
+	SND_PCI_QUIRK(0x1991, 0x5628, "Ordissimo EVE", ALC662_LENOVO_101E),
 	{}
 };
 
@@ -19461,6 +19486,7 @@
 	ALC662_FIXUP_ASPIRE,
 	ALC662_FIXUP_IDEAPAD,
 	ALC272_FIXUP_MARIO,
+	ALC662_FIXUP_CZC_P10T,
 };
 
 static const struct alc_fixup alc662_fixups[] = {
@@ -19481,14 +19507,23 @@
 	[ALC272_FIXUP_MARIO] = {
 		.type = ALC_FIXUP_FUNC,
 		.v.func = alc272_fixup_mario,
-	}
+	},
+	[ALC662_FIXUP_CZC_P10T] = {
+		.type = ALC_FIXUP_VERBS,
+		.v.verbs = (const struct hda_verb[]) {
+			{0x14, AC_VERB_SET_EAPD_BTLENABLE, 0},
+			{}
+		}
+	},
 };
 
 static struct snd_pci_quirk alc662_fixup_tbl[] = {
+	SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
 	SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
 	SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
 	SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
 	SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
+	SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
 	{}
 };
 
diff --git a/sound/pci/ice1712/delta.c b/sound/pci/ice1712/delta.c
index 7b62de0..20c6b07 100644
--- a/sound/pci/ice1712/delta.c
+++ b/sound/pci/ice1712/delta.c
@@ -580,6 +580,7 @@
 {
 	int err;
 	struct snd_akm4xxx *ak;
+	unsigned char tmp;
 
 	if (ice->eeprom.subvendor == ICE1712_SUBDEVICE_DELTA1010 &&
 	    ice->eeprom.gpiodir == 0x7b)
@@ -622,6 +623,12 @@
 		break;
 	}
 
+	/* initialize the SPI clock to high */
+	tmp = snd_ice1712_read(ice, ICE1712_IREG_GPIO_DATA);
+	tmp |= ICE1712_DELTA_AP_CCLK;
+	snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, tmp);
+	udelay(5);
+
 	/* initialize spdif */
 	switch (ice->eeprom.subvendor) {
 	case ICE1712_SUBDEVICE_AUDIOPHILE:
diff --git a/sound/pci/oxygen/oxygen.h b/sound/pci/oxygen/oxygen.h
index c2ae63d..f53897a 100644
--- a/sound/pci/oxygen/oxygen.h
+++ b/sound/pci/oxygen/oxygen.h
@@ -92,6 +92,8 @@
 	void (*update_dac_volume)(struct oxygen *chip);
 	void (*update_dac_mute)(struct oxygen *chip);
 	void (*update_center_lfe_mix)(struct oxygen *chip, bool mixed);
+	unsigned int (*adjust_dac_routing)(struct oxygen *chip,
+					   unsigned int play_routing);
 	void (*gpio_changed)(struct oxygen *chip);
 	void (*uart_input)(struct oxygen *chip);
 	void (*ac97_switch)(struct oxygen *chip,
diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
index 9bff14d..26c7e8b 100644
--- a/sound/pci/oxygen/oxygen_mixer.c
+++ b/sound/pci/oxygen/oxygen_mixer.c
@@ -180,6 +180,8 @@
 			    (1 << OXYGEN_PLAY_DAC1_SOURCE_SHIFT) |
 			    (2 << OXYGEN_PLAY_DAC2_SOURCE_SHIFT) |
 			    (3 << OXYGEN_PLAY_DAC3_SOURCE_SHIFT);
+	if (chip->model.adjust_dac_routing)
+		reg_value = chip->model.adjust_dac_routing(chip, reg_value);
 	oxygen_write16_masked(chip, OXYGEN_PLAY_ROUTING, reg_value,
 			      OXYGEN_PLAY_DAC0_SOURCE_MASK |
 			      OXYGEN_PLAY_DAC1_SOURCE_MASK |
diff --git a/sound/pci/oxygen/xonar_cs43xx.c b/sound/pci/oxygen/xonar_cs43xx.c
index 9f72d42..2527191 100644
--- a/sound/pci/oxygen/xonar_cs43xx.c
+++ b/sound/pci/oxygen/xonar_cs43xx.c
@@ -392,7 +392,7 @@
 	unsigned int i;
 
 	snd_iprintf(buffer, "\nCS4398: 7?");
-	for (i = 2; i <= 8; ++i)
+	for (i = 2; i < 8; ++i)
 		snd_iprintf(buffer, " %02x", data->cs4398_regs[i]);
 	snd_iprintf(buffer, "\n");
 	dump_cs4362a_registers(data, buffer);
diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c
index e1fa602..bc6eb58 100644
--- a/sound/pci/oxygen/xonar_dg.c
+++ b/sound/pci/oxygen/xonar_dg.c
@@ -24,6 +24,11 @@
  *
  *   SPI 0 -> CS4245
  *
+ *   I²S 1 -> CS4245
+ *   I²S 2 -> CS4361 (center/LFE)
+ *   I²S 3 -> CS4361 (surround)
+ *   I²S 4 -> CS4361 (front)
+ *
  *   GPIO 3 <- ?
  *   GPIO 4 <- headphone detect
  *   GPIO 5 -> route input jack to line-in (0) or mic-in (1)
@@ -36,6 +41,7 @@
  *   input 1 <- aux
  *   input 2 <- front mic
  *   input 4 <- line/mic
+ *   DAC out -> headphones
  *   aux out -> front panel headphones
  */
 
@@ -207,6 +213,35 @@
 	cs4245_write_cached(chip, CS4245_ADC_CTRL, value);
 }
 
+static inline unsigned int shift_bits(unsigned int value,
+				      unsigned int shift_from,
+				      unsigned int shift_to,
+				      unsigned int mask)
+{
+	if (shift_from < shift_to)
+		return (value << (shift_to - shift_from)) & mask;
+	else
+		return (value >> (shift_from - shift_to)) & mask;
+}
+
+static unsigned int adjust_dg_dac_routing(struct oxygen *chip,
+					  unsigned int play_routing)
+{
+	return (play_routing & OXYGEN_PLAY_DAC0_SOURCE_MASK) |
+	       shift_bits(play_routing,
+			  OXYGEN_PLAY_DAC2_SOURCE_SHIFT,
+			  OXYGEN_PLAY_DAC1_SOURCE_SHIFT,
+			  OXYGEN_PLAY_DAC1_SOURCE_MASK) |
+	       shift_bits(play_routing,
+			  OXYGEN_PLAY_DAC1_SOURCE_SHIFT,
+			  OXYGEN_PLAY_DAC2_SOURCE_SHIFT,
+			  OXYGEN_PLAY_DAC2_SOURCE_MASK) |
+	       shift_bits(play_routing,
+			  OXYGEN_PLAY_DAC0_SOURCE_SHIFT,
+			  OXYGEN_PLAY_DAC3_SOURCE_SHIFT,
+			  OXYGEN_PLAY_DAC3_SOURCE_MASK);
+}
+
 static int output_switch_info(struct snd_kcontrol *ctl,
 			      struct snd_ctl_elem_info *info)
 {
@@ -557,6 +592,7 @@
 	.resume = dg_resume,
 	.set_dac_params = set_cs4245_dac_params,
 	.set_adc_params = set_cs4245_adc_params,
+	.adjust_dac_routing = adjust_dg_dac_routing,
 	.dump_registers = dump_cs4245_registers,
 	.model_data_size = sizeof(struct dg),
 	.device_config = PLAYBACK_0_TO_I2S |
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.h b/sound/pcmcia/pdaudiocf/pdaudiocf.h
index bd26e09..6ce9ad7 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf.h
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf.h
@@ -22,7 +22,7 @@
 #define __PDAUDIOCF_H
 
 #include <sound/pcm.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <linux/interrupt.h>
 #include <pcmcia/cistpl.h>
 #include <pcmcia/ds.h>
diff --git a/sound/pcmcia/vx/vxp_ops.c b/sound/pcmcia/vx/vxp_ops.c
index 989e04a..fe33e12 100644
--- a/sound/pcmcia/vx/vxp_ops.c
+++ b/sound/pcmcia/vx/vxp_ops.c
@@ -23,8 +23,8 @@
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/firmware.h>
+#include <linux/io.h>
 #include <sound/core.h>
-#include <asm/io.h>
 #include "vxpocket.h"
 
 
diff --git a/sound/soc/atmel/snd-soc-afeb9260.c b/sound/soc/atmel/snd-soc-afeb9260.c
index da2208e..5e4d499 100644
--- a/sound/soc/atmel/snd-soc-afeb9260.c
+++ b/sound/soc/atmel/snd-soc-afeb9260.c
@@ -129,7 +129,7 @@
 	.cpu_dai_name = "atmel-ssc-dai.0",
 	.codec_dai_name = "tlv320aic23-hifi",
 	.platform_name = "atmel_pcm-audio",
-	.codec_name = "tlv320aic23-codec.0-0x1a",
+	.codec_name = "tlv320aic23-codec.0-001a",
 	.init = afeb9260_tlv320aic23_init,
 	.ops = &afeb9260_ops,
 };
diff --git a/sound/soc/blackfin/Kconfig b/sound/soc/blackfin/Kconfig
index 3abeedd..ae40359 100644
--- a/sound/soc/blackfin/Kconfig
+++ b/sound/soc/blackfin/Kconfig
@@ -1,6 +1,7 @@
 config SND_BF5XX_I2S
 	tristate "SoC I2S Audio for the ADI BF5xx chip"
 	depends on BLACKFIN
+	select SND_BF5XX_SOC_SPORT
 	help
 	  Say Y or M if you want to add support for codecs attached to
 	  the Blackfin SPORT (synchronous serial ports) interface in I2S
@@ -35,6 +36,7 @@
 config SND_BF5XX_TDM
 	tristate "SoC I2S(TDM mode) Audio for the ADI BF5xx chip"
 	depends on (BLACKFIN && SND_SOC)
+	select SND_BF5XX_SOC_SPORT
 	help
 	  Say Y or M if you want to add support for codecs attached to
 	  the Blackfin SPORT (synchronous serial ports) interface in TDM
@@ -61,6 +63,10 @@
 config SND_BF5XX_AC97
 	tristate "SoC AC97 Audio for the ADI BF5xx chip"
 	depends on BLACKFIN
+	select AC97_BUS
+	select SND_SOC_AC97_BUS
+	select SND_BF5XX_SOC_SPORT
+	select SND_BF5XX_SOC_AC97
 	help
 	  Say Y or M if you want to add support for codecs attached to
 	  the Blackfin SPORT (synchronous serial ports) interface in slot 16
@@ -122,17 +128,12 @@
 
 config SND_BF5XX_SOC_I2S
 	tristate
-	select SND_BF5XX_SOC_SPORT
 
 config SND_BF5XX_SOC_TDM
 	tristate
-	select SND_BF5XX_SOC_SPORT
 
 config SND_BF5XX_SOC_AC97
 	tristate
-	select AC97_BUS
-	select SND_SOC_AC97_BUS
-	select SND_BF5XX_SOC_SPORT
 
 config SND_BF5XX_SPORT_NUM
 	int "Set a SPORT for Sound chip"
diff --git a/sound/soc/blackfin/bf5xx-ac97.c b/sound/soc/blackfin/bf5xx-ac97.c
index c5f856e..ffbac26 100644
--- a/sound/soc/blackfin/bf5xx-ac97.c
+++ b/sound/soc/blackfin/bf5xx-ac97.c
@@ -260,9 +260,9 @@
 	pr_debug("%s : sport %d\n", __func__, dai->id);
 	if (!dai->active)
 		return 0;
-	if (dai->capture.active)
+	if (dai->capture_active)
 		sport_rx_stop(sport);
-	if (dai->playback.active)
+	if (dai->playback_active)
 		sport_tx_stop(sport);
 	return 0;
 }
diff --git a/sound/soc/blackfin/bf5xx-ssm2602.c b/sound/soc/blackfin/bf5xx-ssm2602.c
index e902b24..ad28663 100644
--- a/sound/soc/blackfin/bf5xx-ssm2602.c
+++ b/sound/soc/blackfin/bf5xx-ssm2602.c
@@ -119,7 +119,7 @@
 	.cpu_dai_name = "bf5xx-i2s",
 	.codec_dai_name = "ssm2602-hifi",
 	.platform_name = "bf5xx-pcm-audio",
-	.codec_name = "ssm2602-codec.0-0x1b",
+	.codec_name = "ssm2602-codec.0-001b",
 	.ops = &bf5xx_ssm2602_ops,
 };
 
diff --git a/sound/soc/blackfin/bf5xx-tdm.c b/sound/soc/blackfin/bf5xx-tdm.c
index 1251239..5515ac9 100644
--- a/sound/soc/blackfin/bf5xx-tdm.c
+++ b/sound/soc/blackfin/bf5xx-tdm.c
@@ -210,7 +210,7 @@
 #ifdef CONFIG_PM
 static int bf5xx_tdm_suspend(struct snd_soc_dai *dai)
 {
-	struct sport_device *sport = dai->private_data;
+	struct sport_device *sport = snd_soc_dai_get_drvdata(dai);
 
 	if (!dai->active)
 		return 0;
@@ -235,13 +235,13 @@
 		ret = -EBUSY;
 	}
 
-	ret = sport_config_rx(sport, IRFS, 0x1F, 0, 0);
+	ret = sport_config_rx(sport, 0, 0x1F, 0, 0);
 	if (ret) {
 		pr_err("SPORT is busy!\n");
 		ret = -EBUSY;
 	}
 
-	ret = sport_config_tx(sport, ITFS, 0x1F, 0, 0);
+	ret = sport_config_tx(sport, 0, 0x1F, 0, 0);
 	if (ret) {
 		pr_err("SPORT is busy!\n");
 		ret = -EBUSY;
@@ -303,14 +303,14 @@
 		goto sport_config_err;
 	}
 
-	ret = sport_config_rx(sport_handle, IRFS, 0x1F, 0, 0);
+	ret = sport_config_rx(sport_handle, 0, 0x1F, 0, 0);
 	if (ret) {
 		pr_err("SPORT is busy!\n");
 		ret = -EBUSY;
 		goto sport_config_err;
 	}
 
-	ret = sport_config_tx(sport_handle, ITFS, 0x1F, 0, 0);
+	ret = sport_config_tx(sport_handle, 0, 0x1F, 0, 0);
 	if (ret) {
 		pr_err("SPORT is busy!\n");
 		ret = -EBUSY;
diff --git a/sound/soc/codecs/cq93vc.c b/sound/soc/codecs/cq93vc.c
index 46dbfd0..347a567 100644
--- a/sound/soc/codecs/cq93vc.c
+++ b/sound/soc/codecs/cq93vc.c
@@ -153,7 +153,7 @@
 
 static int cq93vc_probe(struct snd_soc_codec *codec)
 {
-	struct davinci_vc *davinci_vc = codec->dev->platform_data;
+	struct davinci_vc *davinci_vc = snd_soc_codec_get_drvdata(codec);
 
 	davinci_vc->cq93vc.codec = codec;
 	codec->control_data = davinci_vc;
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index 03d1e86..bb4bf65 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -367,9 +367,12 @@
 	return 0;
 }
 
+static const u8 cx20442_reg = CX20442_TELOUT | CX20442_MIC;
+
 static struct snd_soc_codec_driver cx20442_codec_dev = {
 	.probe = 	cx20442_codec_probe,
 	.remove = 	cx20442_codec_remove,
+	.reg_cache_default = &cx20442_reg,
 	.reg_cache_size = 1,
 	.reg_word_size = sizeof(u8),
 	.read = cx20442_read_reg_cache,
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 247a6a9..37b8aa8 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -1287,9 +1287,9 @@
 SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0),
 SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0),
 
-SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", "AIF1 Capture",
+SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL,
 		     0, WM8994_POWER_MANAGEMENT_4, 9, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", "AIF1 Capture",
+SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL,
 		     0, WM8994_POWER_MANAGEMENT_4, 8, 0),
 SND_SOC_DAPM_AIF_IN_E("AIF1DAC1L", NULL, 0,
 		      WM8994_POWER_MANAGEMENT_5, 9, 0, wm8958_aif_ev,
@@ -1298,9 +1298,9 @@
 		      WM8994_POWER_MANAGEMENT_5, 8, 0, wm8958_aif_ev,
 		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
-SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", "AIF1 Capture",
+SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", NULL,
 		     0, WM8994_POWER_MANAGEMENT_4, 11, 0),
-SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", "AIF1 Capture",
+SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", NULL,
 		     0, WM8994_POWER_MANAGEMENT_4, 10, 0),
 SND_SOC_DAPM_AIF_IN_E("AIF1DAC2L", NULL, 0,
 		      WM8994_POWER_MANAGEMENT_5, 11, 0, wm8958_aif_ev,
@@ -1345,6 +1345,7 @@
 
 SND_SOC_DAPM_AIF_IN("AIF1DACDAT", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
 SND_SOC_DAPM_AIF_IN("AIF2DACDAT", "AIF2 Playback", 0, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1ADCDAT", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
 SND_SOC_DAPM_AIF_OUT("AIF2ADCDAT", "AIF2 Capture", 0, SND_SOC_NOPM, 0, 0),
 
 SND_SOC_DAPM_MUX("AIF1DAC Mux", SND_SOC_NOPM, 0, 0, &aif1dac_mux),
@@ -1546,6 +1547,11 @@
 	{ "AIF2DAC2R Mixer", "Left Sidetone Switch", "Left Sidetone" },
 	{ "AIF2DAC2R Mixer", "Right Sidetone Switch", "Right Sidetone" },
 
+	{ "AIF1ADCDAT", NULL, "AIF1ADC1L" },
+	{ "AIF1ADCDAT", NULL, "AIF1ADC1R" },
+	{ "AIF1ADCDAT", NULL, "AIF1ADC2L" },
+	{ "AIF1ADCDAT", NULL, "AIF1ADC2R" },
+
 	{ "AIF2ADCDAT", NULL, "AIF2ADC Mux" },
 
 	/* AIF3 output */
@@ -1578,6 +1584,13 @@
 	{ "Right Headphone Mux", "DAC", "DAC1R" },
 };
 
+static const struct snd_soc_dapm_route wm8994_revd_intercon[] = {
+	{ "AIF1DACDAT", NULL, "AIF2DACDAT" },
+	{ "AIF2DACDAT", NULL, "AIF1DACDAT" },
+	{ "AIF1ADCDAT", NULL, "AIF2ADCDAT" },
+	{ "AIF2ADCDAT", NULL, "AIF1ADCDAT" },
+};
+
 static const struct snd_soc_dapm_route wm8994_intercon[] = {
 	{ "AIF2DACL", NULL, "AIF2DAC Mux" },
 	{ "AIF2DACR", NULL, "AIF2DAC Mux" },
@@ -2386,7 +2399,7 @@
 	else
 		val = 0;
 
-	return snd_soc_update_bits(codec, reg, mask, reg);
+	return snd_soc_update_bits(codec, reg, mask, val);
 }
 
 #define WM8994_RATES SNDRV_PCM_RATE_8000_96000
@@ -3129,6 +3142,11 @@
 	case WM8994:
 		snd_soc_dapm_add_routes(dapm, wm8994_intercon,
 					ARRAY_SIZE(wm8994_intercon));
+
+		if (wm8994->revision < 4)
+			snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon,
+						ARRAY_SIZE(wm8994_revd_intercon));
+			
 		break;
 	case WM8958:
 		snd_soc_dapm_add_routes(dapm, wm8958_intercon,
diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c
index 6045cbd..608c84c 100644
--- a/sound/soc/codecs/wm8995.c
+++ b/sound/soc/codecs/wm8995.c
@@ -1223,7 +1223,7 @@
 	else
 		val = 0;
 
-	return snd_soc_update_bits(codec, reg, mask, reg);
+	return snd_soc_update_bits(codec, reg, mask, val);
 }
 
 /* The size in bits of the FLL divide multiplied by 10
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index c466982..613df5d 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -91,6 +91,7 @@
 static void calibrate_dc_servo(struct snd_soc_codec *codec)
 {
 	struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec);
+	s8 offset;
 	u16 reg, reg_l, reg_r, dcs_cfg;
 
 	/* If we're using a digital only path and have a previously
@@ -149,16 +150,14 @@
 			hubs->dcs_codes);
 
 		/* HPOUT1L */
-		if (reg_l + hubs->dcs_codes > 0 &&
-		    reg_l + hubs->dcs_codes < 0xff)
-			reg_l += hubs->dcs_codes;
-		dcs_cfg = reg_l << WM8993_DCS_DAC_WR_VAL_1_SHIFT;
+		offset = reg_l;
+		offset += hubs->dcs_codes;
+		dcs_cfg = (u8)offset << WM8993_DCS_DAC_WR_VAL_1_SHIFT;
 
 		/* HPOUT1R */
-		if (reg_r + hubs->dcs_codes > 0 &&
-		    reg_r + hubs->dcs_codes < 0xff)
-			reg_r += hubs->dcs_codes;
-		dcs_cfg |= reg_r;
+		offset = reg_r;
+		offset += hubs->dcs_codes;
+		dcs_cfg |= (u8)offset;
 
 		dev_dbg(codec->dev, "DCS result: %x\n", dcs_cfg);
 
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
index 0c2d6ba..fe79842 100644
--- a/sound/soc/davinci/davinci-evm.c
+++ b/sound/soc/davinci/davinci-evm.c
@@ -218,12 +218,24 @@
 		.ops = &evm_spdif_ops,
 	},
 };
-static struct snd_soc_dai_link da8xx_evm_dai = {
+
+static struct snd_soc_dai_link da830_evm_dai = {
+	.name = "TLV320AIC3X",
+	.stream_name = "AIC3X",
+	.cpu_dai_name = "davinci-mcasp.1",
+	.codec_dai_name = "tlv320aic3x-hifi",
+	.codec_name = "tlv320aic3x-codec.1-0018",
+	.platform_name = "davinci-pcm-audio",
+	.init = evm_aic3x_init,
+	.ops = &evm_ops,
+};
+
+static struct snd_soc_dai_link da850_evm_dai = {
 	.name = "TLV320AIC3X",
 	.stream_name = "AIC3X",
 	.cpu_dai_name= "davinci-mcasp.0",
 	.codec_dai_name = "tlv320aic3x-hifi",
-	.codec_name = "tlv320aic3x-codec.0-001a",
+	.codec_name = "tlv320aic3x-codec.1-0018",
 	.platform_name = "davinci-pcm-audio",
 	.init = evm_aic3x_init,
 	.ops = &evm_ops,
@@ -259,13 +271,13 @@
 
 static struct snd_soc_card da830_snd_soc_card = {
 	.name = "DA830/OMAP-L137 EVM",
-	.dai_link = &da8xx_evm_dai,
+	.dai_link = &da830_evm_dai,
 	.num_links = 1,
 };
 
 static struct snd_soc_card da850_snd_soc_card = {
 	.name = "DA850/OMAP-L138 EVM",
-	.dai_link = &da8xx_evm_dai,
+	.dai_link = &da850_evm_dai,
 	.num_links = 1,
 };
 
diff --git a/sound/soc/omap/ams-delta.c b/sound/soc/omap/ams-delta.c
index 2101bdc..3167be6 100644
--- a/sound/soc/omap/ams-delta.c
+++ b/sound/soc/omap/ams-delta.c
@@ -507,8 +507,6 @@
 	/* Set up digital mute if not provided by the codec */
 	if (!codec_dai->driver->ops) {
 		codec_dai->driver->ops = &ams_delta_dai_ops;
-	} else if (!codec_dai->driver->ops->digital_mute) {
-		codec_dai->driver->ops->digital_mute = ams_delta_digital_mute;
 	} else {
 		ams_delta_ops.startup = ams_delta_startup;
 		ams_delta_ops.shutdown = ams_delta_shutdown;
diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c
index fc592f0..784cff5 100644
--- a/sound/soc/pxa/corgi.c
+++ b/sound/soc/pxa/corgi.c
@@ -307,10 +307,10 @@
 static struct snd_soc_dai_link corgi_dai = {
 	.name = "WM8731",
 	.stream_name = "WM8731",
-	.cpu_dai_name = "pxa-is2-dai",
+	.cpu_dai_name = "pxa2xx-i2s",
 	.codec_dai_name = "wm8731-hifi",
 	.platform_name = "pxa-pcm-audio",
-	.codec_name = "wm8731-codec-0.001a",
+	.codec_name = "wm8731-codec-0.001b",
 	.init = corgi_wm8731_init,
 	.ops = &corgi_ops,
 };
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c
index 6298ee1..a7d4999 100644
--- a/sound/soc/pxa/poodle.c
+++ b/sound/soc/pxa/poodle.c
@@ -276,7 +276,7 @@
 	.cpu_dai_name = "pxa2xx-i2s",
 	.codec_dai_name = "wm8731-hifi",
 	.platform_name = "pxa-pcm-audio",
-	.codec_name = "wm8731-codec.0-001a",
+	.codec_name = "wm8731-codec.0-001b",
 	.init = poodle_wm8731_init,
 	.ops = &poodle_ops,
 };
diff --git a/sound/soc/pxa/spitz.c b/sound/soc/pxa/spitz.c
index c2acb69..8e15713 100644
--- a/sound/soc/pxa/spitz.c
+++ b/sound/soc/pxa/spitz.c
@@ -315,10 +315,10 @@
 static struct snd_soc_dai_link spitz_dai = {
 	.name = "wm8750",
 	.stream_name = "WM8750",
-	.cpu_dai_name = "pxa-is2",
+	.cpu_dai_name = "pxa2xx-i2s",
 	.codec_dai_name = "wm8750-hifi",
 	.platform_name = "pxa-pcm-audio",
-	.codec_name = "wm8750-codec.0-001a",
+	.codec_name = "wm8750-codec.0-001b",
 	.init = spitz_wm8750_init,
 	.ops = &spitz_ops,
 };
diff --git a/sound/soc/pxa/z2.c b/sound/soc/pxa/z2.c
index 2d4f896..3ceaef6 100644
--- a/sound/soc/pxa/z2.c
+++ b/sound/soc/pxa/z2.c
@@ -104,6 +104,7 @@
 		.name		= "hsdet-gpio",
 		.report		= SND_JACK_HEADSET,
 		.debounce_time	= 200,
+		.invert		= 1,
 	},
 };
 
@@ -192,7 +193,7 @@
 	.cpu_dai_name	= "pxa2xx-i2s",
 	.codec_dai_name	= "wm8750-hifi",
 	.platform_name = "pxa-pcm-audio",
-	.codec_name	= "wm8750-codec.0-001a",
+	.codec_name	= "wm8750-codec.0-001b",
 	.init		= z2_wm8750_init,
 	.ops		= &z2_ops,
 };
diff --git a/sound/soc/samsung/neo1973_gta02_wm8753.c b/sound/soc/samsung/neo1973_gta02_wm8753.c
index 3eec610..0d0ae2b 100644
--- a/sound/soc/samsung/neo1973_gta02_wm8753.c
+++ b/sound/soc/samsung/neo1973_gta02_wm8753.c
@@ -397,11 +397,11 @@
 { /* Hifi Playback - for similatious use with voice below */
 	.name = "WM8753",
 	.stream_name = "WM8753 HiFi",
-	.cpu_dai_name = "s3c24xx-i2s",
+	.cpu_dai_name = "s3c24xx-iis",
 	.codec_dai_name = "wm8753-hifi",
 	.init = neo1973_gta02_wm8753_init,
 	.platform_name = "samsung-audio",
-	.codec_name = "wm8753-codec.0-0x1a",
+	.codec_name = "wm8753-codec.0-001a",
 	.ops = &neo1973_gta02_hifi_ops,
 },
 { /* Voice via BT */
@@ -410,7 +410,7 @@
 	.cpu_dai_name = "bluetooth-dai",
 	.codec_dai_name = "wm8753-voice",
 	.ops = &neo1973_gta02_voice_ops,
-	.codec_name = "wm8753-codec.0-0x1a",
+	.codec_name = "wm8753-codec.0-001a",
 	.platform_name = "samsung-audio",
 },
 };
diff --git a/sound/soc/samsung/neo1973_wm8753.c b/sound/soc/samsung/neo1973_wm8753.c
index c7a2451..d20815d 100644
--- a/sound/soc/samsung/neo1973_wm8753.c
+++ b/sound/soc/samsung/neo1973_wm8753.c
@@ -559,9 +559,9 @@
 	.name = "WM8753",
 	.stream_name = "WM8753 HiFi",
 	.platform_name = "samsung-audio",
-	.cpu_dai_name = "s3c24xx-i2s",
+	.cpu_dai_name = "s3c24xx-iis",
 	.codec_dai_name = "wm8753-hifi",
-	.codec_name = "wm8753-codec.0-0x1a",
+	.codec_name = "wm8753-codec.0-001a",
 	.init = neo1973_wm8753_init,
 	.ops = &neo1973_hifi_ops,
 },
@@ -571,7 +571,7 @@
 	.platform_name = "samsung-audio",
 	.cpu_dai_name = "bluetooth-dai",
 	.codec_dai_name = "wm8753-voice",
-	.codec_name = "wm8753-codec.0-0x1a",
+	.codec_name = "wm8753-codec.0-001a",
 	.ops = &neo1973_voice_ops,
 },
 };
diff --git a/sound/soc/samsung/s3c24xx_simtec_hermes.c b/sound/soc/samsung/s3c24xx_simtec_hermes.c
index bb4292e..08fcaaa 100644
--- a/sound/soc/samsung/s3c24xx_simtec_hermes.c
+++ b/sound/soc/samsung/s3c24xx_simtec_hermes.c
@@ -94,8 +94,8 @@
 static struct snd_soc_dai_link simtec_dai_aic33 = {
 	.name		= "tlv320aic33",
 	.stream_name	= "TLV320AIC33",
-	.codec_name	= "tlv320aic3x-codec.0-0x1a",
-	.cpu_dai_name	= "s3c24xx-i2s",
+	.codec_name	= "tlv320aic3x-codec.0-001a",
+	.cpu_dai_name	= "s3c24xx-iis",
 	.codec_dai_name = "tlv320aic3x-hifi",
 	.platform_name	= "samsung-audio",
 	.init		= simtec_hermes_init,
diff --git a/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c b/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
index fbba4e3..116e3e6 100644
--- a/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
+++ b/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
@@ -85,8 +85,8 @@
 static struct snd_soc_dai_link simtec_dai_aic23 = {
 	.name		= "tlv320aic23",
 	.stream_name	= "TLV320AIC23",
-	.codec_name	= "tlv320aic3x-codec.0-0x1a",
-	.cpu_dai_name	= "s3c24xx-i2s",
+	.codec_name	= "tlv320aic3x-codec.0-001a",
+	.cpu_dai_name	= "s3c24xx-iis",
 	.codec_dai_name = "tlv320aic3x-hifi",
 	.platform_name	= "samsung-audio",
 	.init		= simtec_tlv320aic23_init,
diff --git a/sound/soc/samsung/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c
index cdc8ecb..2c09e93 100644
--- a/sound/soc/samsung/s3c24xx_uda134x.c
+++ b/sound/soc/samsung/s3c24xx_uda134x.c
@@ -228,7 +228,7 @@
 	.stream_name = "UDA134X",
 	.codec_name = "uda134x-hifi",
 	.codec_dai_name = "uda134x-hifi",
-	.cpu_dai_name = "s3c24xx-i2s",
+	.cpu_dai_name = "s3c24xx-iis",
 	.ops = &s3c24xx_uda134x_ops,
 	.platform_name	= "samsung-audio",
 };
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index bac7291..c3f6f1e 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1449,6 +1449,7 @@
 		rtd = &card->rtd_aux[num];
 		name = aux_dev->name;
 	}
+	rtd->card = card;
 
 	/* machine controls, routes and widgets are not prefixed */
 	temp = codec->name_prefix;
@@ -1471,7 +1472,6 @@
 
 	/* register the rtd device */
 	rtd->codec = codec;
-	rtd->card = card;
 	rtd->dev.parent = card->dev;
 	rtd->dev.release = rtd_release;
 	rtd->dev.init_name = name;
@@ -1664,9 +1664,6 @@
 	goto out;
 
 found:
-	if (!try_module_get(codec->dev->driver->owner))
-		return -ENODEV;
-
 	ret = soc_probe_codec(card, codec);
 	if (ret < 0)
 		return ret;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 499730a..8194f15 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1742,7 +1742,7 @@
 	int max = mc->max;
 	unsigned int mask = (1 << fls(max)) - 1;
 	unsigned int invert = mc->invert;
-	unsigned int val, val_mask;
+	unsigned int val;
 	int connect, change;
 	struct snd_soc_dapm_update update;
 
@@ -1750,13 +1750,13 @@
 
 	if (invert)
 		val = max - val;
-	val_mask = mask << shift;
+	mask = mask << shift;
 	val = val << shift;
 
 	mutex_lock(&widget->codec->mutex);
 	widget->value = val;
 
-	change = snd_soc_test_bits(widget->codec, reg, val_mask, val);
+	change = snd_soc_test_bits(widget->codec, reg, mask, val);
 	if (change) {
 		if (val)
 			/* new connection */
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 7df89b3..85af605 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -95,7 +95,7 @@
 };
 
 
-/*E-mu 0202(0404) eXtension Unit(XU) control*/
+/*E-mu 0202/0404/0204 eXtension Unit(XU) control*/
 enum {
 	USB_XU_CLOCK_RATE 		= 0xe301,
 	USB_XU_CLOCK_SOURCE		= 0xe302,
@@ -1566,7 +1566,7 @@
 			cval->initialized = 1;
 		} else {
 			if (type == USB_XU_CLOCK_RATE) {
-				/* E-Mu USB 0404/0202/TrackerPre
+				/* E-Mu USB 0404/0202/TrackerPre/0204
 				 * samplerate control quirk
 				 */
 				cval->min = 0;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 3599987..921a86f 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -79,6 +79,13 @@
 	.idProduct = 0x3f0a,
 	.bInterfaceClass = USB_CLASS_AUDIO,
 },
+{
+	/* E-Mu 0204 USB */
+	.match_flags = USB_DEVICE_ID_MATCH_DEVICE,
+	.idVendor = 0x041e,
+	.idProduct = 0x3f19,
+	.bInterfaceClass = USB_CLASS_AUDIO,
+},
 
 /*
  * Logitech QuickCam: bDeviceClass is vendor-specific, so generic interface
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index cf8bf08..e314cdb 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -532,7 +532,7 @@
 }
 
 /*
- * For E-Mu 0404USB/0202USB/TrackerPre sample rate should be set for device,
+ * For E-Mu 0404USB/0202USB/TrackerPre/0204 sample rate should be set for device,
  * not for interface.
  */
 
@@ -589,6 +589,7 @@
 	case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
 	case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
 	case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
+	case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */
 		set_format_emu_quirk(subs, fmt);
 		break;
 	}
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 2b5387d..7141c42 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -204,13 +204,11 @@
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Winit-self
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wpacked
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wredundant-decls
-EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstack-protector
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wstrict-aliasing=3
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-default
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wswitch-enum
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wno-system-headers
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wundef
-EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wvolatile-register-var
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wwrite-strings
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wbad-function-cast
 EXTRA_WARNINGS := $(EXTRA_WARNINGS) -Wmissing-declarations
@@ -294,6 +292,13 @@
 	CFLAGS := $(CFLAGS) -fstack-protector-all
 endif
 
+ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -Wstack-protector),y)
+       CFLAGS := $(CFLAGS) -Wstack-protector
+endif
+
+ifeq ($(call try-cc,$(SOURCE_HELLO),-Werror -Wvolatile-register-var),y)
+       CFLAGS := $(CFLAGS) -Wvolatile-register-var
+endif
 
 ### --- END CONFIGURATION SECTION ---
 
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index c056cdc..8879463 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -212,7 +212,7 @@
 			continue;
 
 		offset = start + i;
-		sprintf(cmd, "addr2line -e %s %016llx", filename, offset);
+		sprintf(cmd, "addr2line -e %s %016" PRIx64, filename, offset);
 		fp = popen(cmd, "r");
 		if (!fp)
 			continue;
@@ -270,9 +270,9 @@
 
 	for (offset = 0; offset < len; ++offset)
 		if (h->ip[offset] != 0)
-			printf("%*Lx: %Lu\n", BITS_PER_LONG / 2,
+			printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2,
 			       sym->start + offset, h->ip[offset]);
-	printf("%*s: %Lu\n", BITS_PER_LONG / 2, "h->sum", h->sum);
+	printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum);
 }
 
 static int hist_entry__tty_annotate(struct hist_entry *he)
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index def7ddc..d97256d 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -371,10 +371,10 @@
 			addr = data->ptr;
 
 		if (sym != NULL)
-			snprintf(buf, sizeof(buf), "%s+%Lx", sym->name,
+			snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
 				 addr - map->unmap_ip(map, sym->start));
 		else
-			snprintf(buf, sizeof(buf), "%#Lx", addr);
+			snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
 		printf(" %-34s |", buf);
 
 		printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %8lu | %6.3f%%\n",
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index b9c6e54..2b36def 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -782,9 +782,9 @@
 		pr_info("%10u ", st->nr_acquired);
 		pr_info("%10u ", st->nr_contended);
 
-		pr_info("%15llu ", st->wait_time_total);
-		pr_info("%15llu ", st->wait_time_max);
-		pr_info("%15llu ", st->wait_time_min == ULLONG_MAX ?
+		pr_info("%15" PRIu64 " ", st->wait_time_total);
+		pr_info("%15" PRIu64 " ", st->wait_time_max);
+		pr_info("%15" PRIu64 " ", st->wait_time_min == ULLONG_MAX ?
 		       0 : st->wait_time_min);
 		pr_info("\n");
 	}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index fcd29e8..60cac6f 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -759,8 +759,8 @@
 		perf_session__process_machines(session, event__synthesize_guest_os);
 
 	if (!system_wide)
-		event__synthesize_thread(target_tid, process_synthesized_event,
-					 session);
+		event__synthesize_thread_map(threads, process_synthesized_event,
+					     session);
 	else
 		event__synthesize_threads(process_synthesized_event, session);
 
@@ -817,7 +817,7 @@
 	 * Approximate RIP event size: 24 bytes.
 	 */
 	fprintf(stderr,
-		"[ perf record: Captured and wrote %.3f MB %s (~%lld samples) ]\n",
+		"[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
 		(double)bytes_written / 1024.0 / 1024.0,
 		output_name,
 		bytes_written / 24);
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 75183a4..c27e31f 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -197,7 +197,7 @@
 					   event->read.value);
 	}
 
-	dump_printf(": %d %d %s %Lu\n", event->read.pid, event->read.tid,
+	dump_printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
 		    attr ? __event_name(attr->type, attr->config) : "FAIL",
 		    event->read.value);
 
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 29e7ffd..29acb89 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -193,7 +193,7 @@
 	}
 	run_measurement_overhead = min_delta;
 
-	printf("run measurement overhead: %Ld nsecs\n", min_delta);
+	printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
 }
 
 static void calibrate_sleep_measurement_overhead(void)
@@ -211,7 +211,7 @@
 	min_delta -= 10000;
 	sleep_measurement_overhead = min_delta;
 
-	printf("sleep measurement overhead: %Ld nsecs\n", min_delta);
+	printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
 }
 
 static struct sched_atom *
@@ -617,13 +617,13 @@
 	burn_nsecs(1e6);
 	T1 = get_nsecs();
 
-	printf("the run test took %Ld nsecs\n", T1-T0);
+	printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
 
 	T0 = get_nsecs();
 	sleep_nsecs(1e6);
 	T1 = get_nsecs();
 
-	printf("the sleep test took %Ld nsecs\n", T1-T0);
+	printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
 }
 
 #define FILL_FIELD(ptr, field, event, data)	\
@@ -816,10 +816,10 @@
 		delta = 0;
 
 	if (delta < 0)
-		die("hm, delta: %Ld < 0 ?\n", delta);
+		die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
 
 	if (verbose) {
-		printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
+		printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
 			switch_event->prev_comm, switch_event->prev_pid,
 			switch_event->next_comm, switch_event->next_pid,
 			delta);
@@ -1048,7 +1048,7 @@
 		delta = 0;
 
 	if (delta < 0)
-		die("hm, delta: %Ld < 0 ?\n", delta);
+		die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
 
 
 	sched_out = perf_session__findnew(session, switch_event->prev_pid);
@@ -1221,7 +1221,7 @@
 
 	avg = work_list->total_lat / work_list->nb_atoms;
 
-	printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
+	printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
 	      (double)work_list->total_runtime / 1e6,
 		 work_list->nb_atoms, (double)avg / 1e6,
 		 (double)work_list->max_lat / 1e6,
@@ -1423,7 +1423,7 @@
 		delta = 0;
 
 	if (delta < 0)
-		die("hm, delta: %Ld < 0 ?\n", delta);
+		die("hm, delta: %" PRIu64 " < 0 ?\n", delta);
 
 
 	sched_out = perf_session__findnew(session, switch_event->prev_pid);
@@ -1713,7 +1713,7 @@
 	}
 
 	printf(" -----------------------------------------------------------------------------------------\n");
-	printf("  TOTAL:                |%11.3f ms |%9Ld |\n",
+	printf("  TOTAL:                |%11.3f ms |%9" PRIu64 " |\n",
 		(double)all_runtime/1e6, all_count);
 
 	printf(" ---------------------------------------------------\n");
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 150a606..b766c2a 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -77,8 +77,8 @@
 	if (session->sample_type & PERF_SAMPLE_RAW) {
 		if (debug_mode) {
 			if (sample->time < last_timestamp) {
-				pr_err("Samples misordered, previous: %llu "
-					"this: %llu\n", last_timestamp,
+				pr_err("Samples misordered, previous: %" PRIu64
+					" this: %" PRIu64 "\n", last_timestamp,
 					sample->time);
 				nr_unordered++;
 			}
@@ -126,7 +126,7 @@
 	ret = perf_session__process_events(session, &event_ops);
 
 	if (debug_mode)
-		pr_err("Misordered timestamps: %llu\n", nr_unordered);
+		pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered);
 
 	return ret;
 }
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 0ff11d9..a482a19 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -206,8 +206,8 @@
 		update_stats(&ps->res_stats[i], count[i]);
 
 	if (verbose) {
-		fprintf(stderr, "%s: %Ld %Ld %Ld\n", event_name(counter),
-				count[0], count[1], count[2]);
+		fprintf(stderr, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
+			event_name(counter), count[0], count[1], count[2]);
 	}
 
 	/*
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index ed56961..5dcdba6 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -146,7 +146,7 @@
 				if (llabs(skew) < page_size)
 					continue;
 
-				pr_debug("%#Lx: diff end addr for %s v: %#Lx k: %#Lx\n",
+				pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
 					 sym->start, sym->name, sym->end, pair->end);
 			} else {
 				struct rb_node *nnd;
@@ -168,11 +168,11 @@
 					goto detour;
 				}
 
-				pr_debug("%#Lx: diff name v: %s k: %s\n",
+				pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
 					 sym->start, sym->name, pair->name);
 			}
 		} else
-			pr_debug("%#Lx: %s not on kallsyms\n", sym->start, sym->name);
+			pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
 
 		err = -1;
 	}
@@ -211,10 +211,10 @@
 
 		if (pair->start == pos->start) {
 			pair->priv = 1;
-			pr_info(" %Lx-%Lx %Lx %s in kallsyms as",
+			pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
 				pos->start, pos->end, pos->pgoff, pos->dso->name);
 			if (pos->pgoff != pair->pgoff || pos->end != pair->end)
-				pr_info(": \n*%Lx-%Lx %Lx",
+				pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
 					pair->start, pair->end, pair->pgoff);
 			pr_info(" %s\n", pair->dso->name);
 			pair->priv = 1;
@@ -307,7 +307,7 @@
 	}
 
 	if (evsel->counts->cpu[0].val != nr_open_calls) {
-		pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %Ld\n",
+		pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
 			 nr_open_calls, evsel->counts->cpu[0].val);
 		goto out_close_fd;
 	}
@@ -332,8 +332,7 @@
 	struct perf_evsel *evsel;
 	struct perf_event_attr attr;
 	unsigned int nr_open_calls = 111, i;
-	cpu_set_t *cpu_set;
-	size_t cpu_set_size;
+	cpu_set_t cpu_set;
 	int id = trace_event__id("sys_enter_open");
 
 	if (id < 0) {
@@ -353,13 +352,8 @@
 		return -1;
 	}
 
-	cpu_set = CPU_ALLOC(cpus->nr);
 
-	if (cpu_set == NULL)
-		goto out_thread_map_delete;
-
-	cpu_set_size = CPU_ALLOC_SIZE(cpus->nr);
-	CPU_ZERO_S(cpu_set_size, cpu_set);
+	CPU_ZERO(&cpu_set);
 
 	memset(&attr, 0, sizeof(attr));
 	attr.type = PERF_TYPE_TRACEPOINT;
@@ -367,7 +361,7 @@
 	evsel = perf_evsel__new(&attr, 0);
 	if (evsel == NULL) {
 		pr_debug("perf_evsel__new\n");
-		goto out_cpu_free;
+		goto out_thread_map_delete;
 	}
 
 	if (perf_evsel__open(evsel, cpus, threads) < 0) {
@@ -379,14 +373,29 @@
 
 	for (cpu = 0; cpu < cpus->nr; ++cpu) {
 		unsigned int ncalls = nr_open_calls + cpu;
+		/*
+		 * XXX eventually lift this restriction in a way that
+		 * keeps perf building on older glibc installations
+		 * without CPU_ALLOC. 1024 cpus in 2010 still seems
+		 * a reasonable upper limit tho :-)
+		 */
+		if (cpus->map[cpu] >= CPU_SETSIZE) {
+			pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
+			continue;
+		}
 
-		CPU_SET(cpu, cpu_set);
-		sched_setaffinity(0, cpu_set_size, cpu_set);
+		CPU_SET(cpus->map[cpu], &cpu_set);
+		if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
+			pr_debug("sched_setaffinity() failed on CPU %d: %s ",
+				 cpus->map[cpu],
+				 strerror(errno));
+			goto out_close_fd;
+		}
 		for (i = 0; i < ncalls; ++i) {
 			fd = open("/etc/passwd", O_RDONLY);
 			close(fd);
 		}
-		CPU_CLR(cpu, cpu_set);
+		CPU_CLR(cpus->map[cpu], &cpu_set);
 	}
 
 	/*
@@ -402,6 +411,9 @@
 	for (cpu = 0; cpu < cpus->nr; ++cpu) {
 		unsigned int expected;
 
+		if (cpus->map[cpu] >= CPU_SETSIZE)
+			continue;
+
 		if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
 			pr_debug("perf_evsel__open_read_on_cpu\n");
 			goto out_close_fd;
@@ -409,8 +421,8 @@
 
 		expected = nr_open_calls + cpu;
 		if (evsel->counts->cpu[cpu].val != expected) {
-			pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %Ld\n",
-				 expected, cpu, evsel->counts->cpu[cpu].val);
+			pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
+				 expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
 			goto out_close_fd;
 		}
 	}
@@ -420,8 +432,6 @@
 	perf_evsel__close_fd(evsel, 1, threads->nr);
 out_evsel_delete:
 	perf_evsel__delete(evsel);
-out_cpu_free:
-	CPU_FREE(cpu_set);
 out_thread_map_delete:
 	thread_map__delete(threads);
 	return err;
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 05344c6..5a29d9c 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -40,6 +40,7 @@
 #include <stdio.h>
 #include <termios.h>
 #include <unistd.h>
+#include <inttypes.h>
 
 #include <errno.h>
 #include <time.h>
@@ -214,7 +215,7 @@
 	len = sym->end - sym->start;
 
 	sprintf(command,
-		"objdump --start-address=%#0*Lx --stop-address=%#0*Lx -dS %s",
+		"objdump --start-address=%#0*" PRIx64 " --stop-address=%#0*" PRIx64 " -dS %s",
 		BITS_PER_LONG / 4, map__rip_2objdump(map, sym->start),
 		BITS_PER_LONG / 4, map__rip_2objdump(map, sym->end), path);
 
@@ -308,7 +309,7 @@
 	struct source_line *line;
 	char pattern[PATTERN_LEN + 1];
 
-	sprintf(pattern, "%0*Lx <", BITS_PER_LONG / 4,
+	sprintf(pattern, "%0*" PRIx64 " <", BITS_PER_LONG / 4,
 		map__rip_2objdump(syme->map, symbol->start));
 
 	pthread_mutex_lock(&syme->src->lock);
@@ -537,7 +538,7 @@
 	if (nr_counters == 1 || !display_weighted) {
 		struct perf_evsel *first;
 		first = list_entry(evsel_list.next, struct perf_evsel, node);
-		printf("%Ld", first->attr.sample_period);
+		printf("%" PRIu64, (uint64_t)first->attr.sample_period);
 		if (freq)
 			printf("Hz ");
 		else
@@ -640,7 +641,7 @@
 
 		percent_color_fprintf(stdout, "%4.1f%%", pcnt);
 		if (verbose)
-			printf(" %016llx", sym->start);
+			printf(" %016" PRIx64, sym->start);
 		printf(" %-*.*s", sym_width, sym_width, sym->name);
 		printf(" %-*.*s\n", dso_width, dso_width,
 		       dso_width >= syme->map->dso->long_name_len ?
@@ -1305,7 +1306,7 @@
 		return -ENOMEM;
 
 	if (target_tid != -1)
-		event__synthesize_thread(target_tid, event__process, session);
+		event__synthesize_thread_map(threads, event__process, session);
 	else
 		event__synthesize_threads(event__process, session);
 
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 2302ec0..50d0a93 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -263,11 +263,12 @@
 					     process, session);
 }
 
-int event__synthesize_thread(pid_t pid, event__handler_t process,
-			     struct perf_session *session)
+int event__synthesize_thread_map(struct thread_map *threads,
+				 event__handler_t process,
+				 struct perf_session *session)
 {
 	event_t *comm_event, *mmap_event;
-	int err = -1;
+	int err = -1, thread;
 
 	comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
 	if (comm_event == NULL)
@@ -277,8 +278,15 @@
 	if (mmap_event == NULL)
 		goto out_free_comm;
 
-	err = __event__synthesize_thread(comm_event, mmap_event, pid,
-					 process, session);
+	err = 0;
+	for (thread = 0; thread < threads->nr; ++thread) {
+		if (__event__synthesize_thread(comm_event, mmap_event,
+					       threads->map[thread],
+					       process, session)) {
+			err = -1;
+			break;
+		}
+	}
 	free(mmap_event);
 out_free_comm:
 	free(comm_event);
@@ -459,7 +467,8 @@
 int event__process_lost(event_t *self, struct sample_data *sample __used,
 			struct perf_session *session)
 {
-	dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost);
+	dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
+		    self->lost.id, self->lost.lost);
 	session->hists.stats.total_lost += self->lost.lost;
 	return 0;
 }
@@ -575,7 +584,7 @@
 	u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
 	int ret = 0;
 
-	dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n",
+	dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n",
 			self->mmap.pid, self->mmap.tid, self->mmap.start,
 			self->mmap.len, self->mmap.pgoff, self->mmap.filename);
 
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 2b7e919..cc7b52f 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -135,14 +135,16 @@
 void event__print_totals(void);
 
 struct perf_session;
+struct thread_map;
 
 typedef int (*event__handler_synth_t)(event_t *event, 
 				      struct perf_session *session);
 typedef int (*event__handler_t)(event_t *event, struct sample_data *sample,
 				struct perf_session *session);
 
-int event__synthesize_thread(pid_t pid, event__handler_t process,
-			     struct perf_session *session);
+int event__synthesize_thread_map(struct thread_map *threads,
+				 event__handler_t process,
+				 struct perf_session *session);
 int event__synthesize_threads(event__handler_t process,
 			      struct perf_session *session);
 int event__synthesize_kernel_mmap(event__handler_t process,
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index f5cfed6..d8575d3 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -90,7 +90,7 @@
 	int cpu, thread;
 	struct perf_counts_values *aggr = &evsel->counts->aggr, count;
 
-	aggr->val = 0;
+	aggr->val = aggr->ena = aggr->run = 0;
 
 	for (cpu = 0; cpu < ncpus; cpu++) {
 		for (thread = 0; thread < nthreads; thread++) {
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 989fa2d..f6a929e 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -798,8 +798,8 @@
 				      int feat, int fd)
 {
 	if (lseek(fd, self->offset, SEEK_SET) == (off_t)-1) {
-		pr_debug("Failed to lseek to %Ld offset for feature %d, "
-			 "continuing...\n", self->offset, feat);
+		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
+			  "%d, continuing...\n", self->offset, feat);
 		return 0;
 	}
 
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index c749ba6..32f4f1f 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -636,13 +636,13 @@
 			}
 		}
 	} else
-		ret = snprintf(s, size, sep ? "%lld" : "%12lld ", period);
+		ret = snprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period);
 
 	if (symbol_conf.show_nr_samples) {
 		if (sep)
-			ret += snprintf(s + ret, size - ret, "%c%lld", *sep, period);
+			ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period);
 		else
-			ret += snprintf(s + ret, size - ret, "%11lld", period);
+			ret += snprintf(s + ret, size - ret, "%11" PRIu64, period);
 	}
 
 	if (pair_hists) {
@@ -971,7 +971,7 @@
 	sym_size = sym->end - sym->start;
 	offset = ip - sym->start;
 
-	pr_debug3("%s: ip=%#Lx\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
+	pr_debug3("%s: ip=%#" PRIx64 "\n", __func__, self->ms.map->unmap_ip(self->ms.map, ip));
 
 	if (offset >= sym_size)
 		return 0;
@@ -980,8 +980,9 @@
 	h->sum++;
 	h->ip[offset]++;
 
-	pr_debug3("%#Lx %s: period++ [ip: %#Lx, %#Lx] => %Ld\n", self->ms.sym->start,
-		  self->ms.sym->name, ip, ip - self->ms.sym->start, h->ip[offset]);
+	pr_debug3("%#" PRIx64 " %s: period++ [ip: %#" PRIx64 ", %#" PRIx64
+		  "] => %" PRIu64 "\n", self->ms.sym->start, self->ms.sym->name,
+		  ip, ip - self->ms.sym->start, h->ip[offset]);
 	return 0;
 }
 
@@ -1132,7 +1133,7 @@
 		goto out_free_filename;
 	}
 
-	pr_debug("%s: filename=%s, sym=%s, start=%#Lx, end=%#Lx\n", __func__,
+	pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
 		 filename, sym->name, map->unmap_ip(map, sym->start),
 		 map->unmap_ip(map, sym->end));
 
@@ -1142,7 +1143,7 @@
 		 dso, dso->long_name, sym, sym->name);
 
 	snprintf(command, sizeof(command),
-		 "objdump --start-address=0x%016Lx --stop-address=0x%016Lx -dS -C %s|grep -v %s|expand",
+		 "objdump --start-address=0x%016" PRIx64 " --stop-address=0x%016" PRIx64 " -dS -C %s|grep -v %s|expand",
 		 map__rip_2objdump(map, sym->start),
 		 map__rip_2objdump(map, sym->end),
 		 symfs_filename, filename);
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
index 8be0b96..305c848 100644
--- a/tools/perf/util/include/linux/bitops.h
+++ b/tools/perf/util/include/linux/bitops.h
@@ -2,6 +2,7 @@
 #define _PERF_LINUX_BITOPS_H_
 
 #include <linux/kernel.h>
+#include <linux/compiler.h>
 #include <asm/hweight.h>
 
 #define BITS_PER_LONG __WORDSIZE
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 3a7eb6e..a16ecab 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -1,5 +1,6 @@
 #include "symbol.h"
 #include <errno.h>
+#include <inttypes.h>
 #include <limits.h>
 #include <stdlib.h>
 #include <string.h>
@@ -195,7 +196,7 @@
 
 size_t map__fprintf(struct map *self, FILE *fp)
 {
-	return fprintf(fp, " %Lx-%Lx %Lx %s\n",
+	return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
 		       self->start, self->end, self->pgoff, self->dso->name);
 }
 
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index bc2732e..135f69b 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -279,7 +279,7 @@
 	static char buf[32];
 
 	if (type == PERF_TYPE_RAW) {
-		sprintf(buf, "raw 0x%llx", config);
+		sprintf(buf, "raw 0x%" PRIx64, config);
 		return buf;
 	}
 
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index b82cafb..458e3ec 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -23,7 +23,7 @@
 };
 
 extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
-extern bool have_tracepoints(struct list_head *evsel_list);
+extern bool have_tracepoints(struct list_head *evlist);
 
 extern int			nr_counters;
 
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 128aaab..6e29d9c 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -172,7 +172,7 @@
 	sym = __find_kernel_function_by_name(tp->symbol, &map);
 	if (sym) {
 		addr = map->unmap_ip(map, sym->start + tp->offset);
-		pr_debug("try to find %s+%ld@%llx\n", tp->symbol,
+		pr_debug("try to find %s+%ld@%" PRIx64 "\n", tp->symbol,
 			 tp->offset, addr);
 		ret = find_perf_probe_point((unsigned long)addr, pp);
 	}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 313dac2..105f00b 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -652,10 +652,11 @@
 {
 	unsigned int i;
 
-	printf("... chain: nr:%Lu\n", sample->callchain->nr);
+	printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr);
 
 	for (i = 0; i < sample->callchain->nr; i++)
-		printf("..... %2d: %016Lx\n", i, sample->callchain->ips[i]);
+		printf("..... %2d: %016" PRIx64 "\n",
+		       i, sample->callchain->ips[i]);
 }
 
 static void perf_session__print_tstamp(struct perf_session *session,
@@ -672,7 +673,7 @@
 		printf("%u ", sample->cpu);
 
 	if (session->sample_type & PERF_SAMPLE_TIME)
-		printf("%Lu ", sample->time);
+		printf("%" PRIu64 " ", sample->time);
 }
 
 static void dump_event(struct perf_session *session, event_t *event,
@@ -681,16 +682,16 @@
 	if (!dump_trace)
 		return;
 
-	printf("\n%#Lx [%#x]: event: %d\n", file_offset, event->header.size,
-	       event->header.type);
+	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
+	       file_offset, event->header.size, event->header.type);
 
 	trace_event(event);
 
 	if (sample)
 		perf_session__print_tstamp(session, event, sample);
 
-	printf("%#Lx [%#x]: PERF_RECORD_%s", file_offset, event->header.size,
-	       event__get_event_name(event->header.type));
+	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
+	       event->header.size, event__get_event_name(event->header.type));
 }
 
 static void dump_sample(struct perf_session *session, event_t *event,
@@ -699,8 +700,9 @@
 	if (!dump_trace)
 		return;
 
-	printf("(IP, %d): %d/%d: %#Lx period: %Ld\n", event->header.misc,
-	       sample->pid, sample->tid, sample->ip, sample->period);
+	printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 "\n",
+	       event->header.misc, sample->pid, sample->tid, sample->ip,
+	       sample->period);
 
 	if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
 		callchain__printf(sample);
@@ -843,8 +845,8 @@
 {
 	if (ops->lost == event__process_lost &&
 	    session->hists.stats.total_lost != 0) {
-		ui__warning("Processed %Lu events and LOST %Lu!\n\n"
-			    "Check IO/CPU overload!\n\n",
+		ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64
+			    "!\n\nCheck IO/CPU overload!\n\n",
 			    session->hists.stats.total_period,
 			    session->hists.stats.total_lost);
 	}
@@ -918,7 +920,7 @@
 
 	if (size == 0 ||
 	    (skip = perf_session__process_event(self, &event, ops, head)) < 0) {
-		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
+		dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
 			    head, event.header.size, event.header.type);
 		/*
 		 * assume we lost track of the stream, check alignment, and
@@ -1023,7 +1025,7 @@
 
 	if (size == 0 ||
 	    perf_session__process_event(session, event, ops, file_pos) < 0) {
-		dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
+		dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n",
 			    file_offset + head, event->header.size,
 			    event->header.type);
 		/*
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index b3637db..fb737fe 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -12,6 +12,7 @@
  * of the License.
  */
 
+#include <inttypes.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <unistd.h>
@@ -43,11 +44,11 @@
 	return cpu2slot(cpu) * SLOT_MULT;
 }
 
-static double time2pixels(u64 time)
+static double time2pixels(u64 __time)
 {
 	double X;
 
-	X = 1.0 * svg_page_width * (time - first_time) / (last_time - first_time);
+	X = 1.0 * svg_page_width * (__time - first_time) / (last_time - first_time);
 	return X;
 }
 
@@ -94,7 +95,7 @@
 
 	total_height = (1 + rows + cpu2slot(cpus)) * SLOT_MULT;
 	fprintf(svgfile, "<?xml version=\"1.0\" standalone=\"no\"?> \n");
-	fprintf(svgfile, "<svg width=\"%i\" height=\"%llu\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n", svg_page_width, total_height);
+	fprintf(svgfile, "<svg width=\"%i\" height=\"%" PRIu64 "\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n", svg_page_width, total_height);
 
 	fprintf(svgfile, "<defs>\n  <style type=\"text/css\">\n    <![CDATA[\n");
 
@@ -483,7 +484,7 @@
 			color = 128;
 		}
 
-		fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%llu\" style=\"stroke:rgb(%i,%i,%i);stroke-width:%1.3f\"/>\n",
+		fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%" PRIu64 "\" style=\"stroke:rgb(%i,%i,%i);stroke-width:%1.3f\"/>\n",
 			time2pixels(i), SLOT_MULT/2, time2pixels(i), total_height, color, color, color, thickness);
 
 		i += 10000000;
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 15ccfba..7821d0e 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -11,6 +11,7 @@
 #include <sys/param.h>
 #include <fcntl.h>
 #include <unistd.h>
+#include <inttypes.h>
 #include "build-id.h"
 #include "debug.h"
 #include "symbol.h"
@@ -153,7 +154,7 @@
 	self->binding = binding;
 	self->namelen = namelen - 1;
 
-	pr_debug4("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end);
+	pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", __func__, name, start, self->end);
 
 	memcpy(self->name, name, namelen);
 
@@ -167,7 +168,7 @@
 
 static size_t symbol__fprintf(struct symbol *self, FILE *fp)
 {
-	return fprintf(fp, " %llx-%llx %c %s\n",
+	return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %c %s\n",
 		       self->start, self->end,
 		       self->binding == STB_GLOBAL ? 'g' :
 		       self->binding == STB_LOCAL  ? 'l' : 'w',
@@ -1161,6 +1162,13 @@
 
 		section_name = elf_sec__name(&shdr, secstrs);
 
+		/* On ARM, symbols for thumb functions have 1 added to
+		 * the symbol address as a flag - remove it */
+		if ((ehdr.e_machine == EM_ARM) &&
+		    (map->type == MAP__FUNCTION) &&
+		    (sym.st_value & 1))
+			--sym.st_value;
+
 		if (self->kernel != DSO_TYPE_USER || kmodule) {
 			char dso_name[PATH_MAX];
 
@@ -1208,8 +1216,8 @@
 		}
 
 		if (curr_dso->adjust_symbols) {
-			pr_debug4("%s: adjusting symbol: st_value: %#Lx "
-				  "sh_addr: %#Lx sh_offset: %#Lx\n", __func__,
+			pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
+				  "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
 				  (u64)sym.st_value, (u64)shdr.sh_addr,
 				  (u64)shdr.sh_offset);
 			sym.st_value -= shdr.sh_addr - shdr.sh_offset;
diff --git a/tools/perf/util/types.h b/tools/perf/util/types.h
index 7d6b833..5f3689a 100644
--- a/tools/perf/util/types.h
+++ b/tools/perf/util/types.h
@@ -1,12 +1,14 @@
 #ifndef __PERF_TYPES_H
 #define __PERF_TYPES_H
 
+#include <stdint.h>
+
 /*
- * We define u64 as unsigned long long for every architecture
- * so that we can print it with %Lx without getting warnings.
+ * We define u64 as uint64_t for every architecture
+ * so that we can print it with "%"PRIx64 without getting warnings.
  */
-typedef unsigned long long u64;
-typedef signed long long   s64;
+typedef uint64_t	   u64;
+typedef int64_t		   s64;
 typedef unsigned int	   u32;
 typedef signed int	   s32;
 typedef unsigned short	   u16;
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index ebda8c3..60c463c 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -350,7 +350,7 @@
 	if (self->ms.sym)
 		return self->ms.sym->name;
 
-	snprintf(bf, bfsize, "%#Lx", self->ip);
+	snprintf(bf, bfsize, "%#" PRIx64, self->ip);
 	return bf;
 }
 
diff --git a/tools/perf/util/ui/browsers/map.c b/tools/perf/util/ui/browsers/map.c
index e35437d..e515836 100644
--- a/tools/perf/util/ui/browsers/map.c
+++ b/tools/perf/util/ui/browsers/map.c
@@ -1,5 +1,6 @@
 #include "../libslang.h"
 #include <elf.h>
+#include <inttypes.h>
 #include <sys/ttydefaults.h>
 #include <ctype.h>
 #include <string.h>
@@ -57,7 +58,7 @@
 	int width;
 
 	ui_browser__set_percent_color(self, 0, current_entry);
-	slsmg_printf("%*llx %*llx %c ",
+	slsmg_printf("%*" PRIx64 " %*" PRIx64 " %c ",
 		     mb->addrlen, sym->start, mb->addrlen, sym->end,
 		     sym->binding == STB_GLOBAL ? 'g' :
 		     sym->binding == STB_LOCAL  ? 'l' : 'w');
@@ -150,6 +151,6 @@
 		++mb.b.nr_entries;
 	}
 
-	mb.addrlen = snprintf(tmp, sizeof(tmp), "%llx", maxaddr);
+	mb.addrlen = snprintf(tmp, sizeof(tmp), "%" PRIx64, maxaddr);
 	return map_browser__run(&mb);
 }
diff --git a/tools/perf/util/values.c b/tools/perf/util/values.c
index cfa55d6..bdd3347 100644
--- a/tools/perf/util/values.c
+++ b/tools/perf/util/values.c
@@ -150,7 +150,7 @@
 		if (width > tidwidth)
 			tidwidth = width;
 		for (j = 0; j < values->counters; j++) {
-			width = snprintf(NULL, 0, "%Lu", values->value[i][j]);
+			width = snprintf(NULL, 0, "%" PRIu64, values->value[i][j]);
 			if (width > counterwidth[j])
 				counterwidth[j] = width;
 		}
@@ -165,7 +165,7 @@
 		fprintf(fp, "  %*d  %*d", pidwidth, values->pid[i],
 			tidwidth, values->tid[i]);
 		for (j = 0; j < values->counters; j++)
-			fprintf(fp, "  %*Lu",
+			fprintf(fp, "  %*" PRIu64,
 				counterwidth[j], values->value[i][j]);
 		fprintf(fp, "\n");
 	}
@@ -196,13 +196,13 @@
 		width = strlen(values->countername[j]);
 		if (width > namewidth)
 			namewidth = width;
-		width = snprintf(NULL, 0, "%llx", values->counterrawid[j]);
+		width = snprintf(NULL, 0, "%" PRIx64, values->counterrawid[j]);
 		if (width > rawwidth)
 			rawwidth = width;
 	}
 	for (i = 0; i < values->threads; i++) {
 		for (j = 0; j < values->counters; j++) {
-			width = snprintf(NULL, 0, "%Lu", values->value[i][j]);
+			width = snprintf(NULL, 0, "%" PRIu64, values->value[i][j]);
 			if (width > countwidth)
 				countwidth = width;
 		}
@@ -214,7 +214,7 @@
 		countwidth, "Count");
 	for (i = 0; i < values->threads; i++)
 		for (j = 0; j < values->counters; j++)
-			fprintf(fp, "  %*d  %*d  %*s  %*llx  %*Lu\n",
+			fprintf(fp, "  %*d  %*d  %*s  %*" PRIx64 "  %*" PRIu64,
 				pidwidth, values->pid[i],
 				tidwidth, values->tid[i],
 				namewidth, values->countername[j],
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 4c6983d..362a0cb 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -72,7 +72,7 @@
 
 int num_cpus;
 
-typedef struct per_cpu_counters {
+struct counters {
 	unsigned long long tsc;		/* per thread */
 	unsigned long long aperf;	/* per thread */
 	unsigned long long mperf;	/* per thread */
@@ -88,13 +88,13 @@
 	int pkg;
 	int core;
 	int cpu;
-	struct per_cpu_counters *next;
-} PCC;
+	struct counters *next;
+};
 
-PCC *pcc_even;
-PCC *pcc_odd;
-PCC *pcc_delta;
-PCC *pcc_average;
+struct counters *cnt_even;
+struct counters *cnt_odd;
+struct counters *cnt_delta;
+struct counters *cnt_average;
 struct timeval tv_even;
 struct timeval tv_odd;
 struct timeval tv_delta;
@@ -125,7 +125,7 @@
 	return msr;
 }
 
-void print_header()
+void print_header(void)
 {
 	if (show_pkg)
 		fprintf(stderr, "pkg ");
@@ -160,39 +160,39 @@
 	putc('\n', stderr);
 }
 
-void dump_pcc(PCC *pcc)
+void dump_cnt(struct counters *cnt)
 {
-	fprintf(stderr, "package: %d ", pcc->pkg);
-	fprintf(stderr, "core:: %d ", pcc->core);
-	fprintf(stderr, "CPU: %d ", pcc->cpu);
-	fprintf(stderr, "TSC: %016llX\n", pcc->tsc);
-	fprintf(stderr, "c3: %016llX\n", pcc->c3);
-	fprintf(stderr, "c6: %016llX\n", pcc->c6);
-	fprintf(stderr, "c7: %016llX\n", pcc->c7);
-	fprintf(stderr, "aperf: %016llX\n", pcc->aperf);
-	fprintf(stderr, "pc2: %016llX\n", pcc->pc2);
-	fprintf(stderr, "pc3: %016llX\n", pcc->pc3);
-	fprintf(stderr, "pc6: %016llX\n", pcc->pc6);
-	fprintf(stderr, "pc7: %016llX\n", pcc->pc7);
-	fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, pcc->extra_msr);
+	fprintf(stderr, "package: %d ", cnt->pkg);
+	fprintf(stderr, "core:: %d ", cnt->core);
+	fprintf(stderr, "CPU: %d ", cnt->cpu);
+	fprintf(stderr, "TSC: %016llX\n", cnt->tsc);
+	fprintf(stderr, "c3: %016llX\n", cnt->c3);
+	fprintf(stderr, "c6: %016llX\n", cnt->c6);
+	fprintf(stderr, "c7: %016llX\n", cnt->c7);
+	fprintf(stderr, "aperf: %016llX\n", cnt->aperf);
+	fprintf(stderr, "pc2: %016llX\n", cnt->pc2);
+	fprintf(stderr, "pc3: %016llX\n", cnt->pc3);
+	fprintf(stderr, "pc6: %016llX\n", cnt->pc6);
+	fprintf(stderr, "pc7: %016llX\n", cnt->pc7);
+	fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr);
 }
 
-void dump_list(PCC *pcc)
+void dump_list(struct counters *cnt)
 {
-	printf("dump_list 0x%p\n", pcc);
+	printf("dump_list 0x%p\n", cnt);
 
-	for (; pcc; pcc = pcc->next)
-		dump_pcc(pcc);
+	for (; cnt; cnt = cnt->next)
+		dump_cnt(cnt);
 }
 
-void print_pcc(PCC *p)
+void print_cnt(struct counters *p)
 {
 	double interval_float;
 
 	interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
 
 	/* topology columns, print blanks on 1st (average) line */
-	if (p == pcc_average) {
+	if (p == cnt_average) {
 		if (show_pkg)
 			fprintf(stderr, "    ");
 		if (show_core)
@@ -262,24 +262,24 @@
 	putc('\n', stderr);
 }
 
-void print_counters(PCC *cnt)
+void print_counters(struct counters *counters)
 {
-	PCC *pcc;
+	struct counters *cnt;
 
 	print_header();
 
 	if (num_cpus > 1)
-		print_pcc(pcc_average);
+		print_cnt(cnt_average);
 
-	for (pcc = cnt; pcc != NULL; pcc = pcc->next)
-		print_pcc(pcc);
+	for (cnt = counters; cnt != NULL; cnt = cnt->next)
+		print_cnt(cnt);
 
 }
 
 #define SUBTRACT_COUNTER(after, before, delta) (delta = (after - before), (before > after))
 
-
-int compute_delta(PCC *after, PCC *before, PCC *delta)
+int compute_delta(struct counters *after,
+	struct counters *before, struct counters *delta)
 {
 	int errors = 0;
 	int perf_err = 0;
@@ -391,20 +391,20 @@
 		delta->extra_msr = after->extra_msr;
 		if (errors) {
 			fprintf(stderr, "ERROR cpu%d before:\n", before->cpu);
-			dump_pcc(before);
+			dump_cnt(before);
 			fprintf(stderr, "ERROR cpu%d after:\n", before->cpu);
-			dump_pcc(after);
+			dump_cnt(after);
 			errors = 0;
 		}
 	}
 	return 0;
 }
 
-void compute_average(PCC *delta, PCC *avg)
+void compute_average(struct counters *delta, struct counters *avg)
 {
-	PCC *sum;
+	struct counters *sum;
 
-	sum = calloc(1, sizeof(PCC));
+	sum = calloc(1, sizeof(struct counters));
 	if (sum == NULL) {
 		perror("calloc sum");
 		exit(1);
@@ -438,35 +438,34 @@
 	free(sum);
 }
 
-void get_counters(PCC *pcc)
+void get_counters(struct counters *cnt)
 {
-	for ( ; pcc; pcc = pcc->next) {
-		pcc->tsc = get_msr(pcc->cpu, MSR_TSC);
+	for ( ; cnt; cnt = cnt->next) {
+		cnt->tsc = get_msr(cnt->cpu, MSR_TSC);
 		if (do_nhm_cstates)
-			pcc->c3 = get_msr(pcc->cpu, MSR_CORE_C3_RESIDENCY);
+			cnt->c3 = get_msr(cnt->cpu, MSR_CORE_C3_RESIDENCY);
 		if (do_nhm_cstates)
-			pcc->c6 = get_msr(pcc->cpu, MSR_CORE_C6_RESIDENCY);
+			cnt->c6 = get_msr(cnt->cpu, MSR_CORE_C6_RESIDENCY);
 		if (do_snb_cstates)
-			pcc->c7 = get_msr(pcc->cpu, MSR_CORE_C7_RESIDENCY);
+			cnt->c7 = get_msr(cnt->cpu, MSR_CORE_C7_RESIDENCY);
 		if (has_aperf)
-			pcc->aperf = get_msr(pcc->cpu, MSR_APERF);
+			cnt->aperf = get_msr(cnt->cpu, MSR_APERF);
 		if (has_aperf)
-			pcc->mperf = get_msr(pcc->cpu, MSR_MPERF);
+			cnt->mperf = get_msr(cnt->cpu, MSR_MPERF);
 		if (do_snb_cstates)
-			pcc->pc2 = get_msr(pcc->cpu, MSR_PKG_C2_RESIDENCY);
+			cnt->pc2 = get_msr(cnt->cpu, MSR_PKG_C2_RESIDENCY);
 		if (do_nhm_cstates)
-			pcc->pc3 = get_msr(pcc->cpu, MSR_PKG_C3_RESIDENCY);
+			cnt->pc3 = get_msr(cnt->cpu, MSR_PKG_C3_RESIDENCY);
 		if (do_nhm_cstates)
-			pcc->pc6 = get_msr(pcc->cpu, MSR_PKG_C6_RESIDENCY);
+			cnt->pc6 = get_msr(cnt->cpu, MSR_PKG_C6_RESIDENCY);
 		if (do_snb_cstates)
-			pcc->pc7 = get_msr(pcc->cpu, MSR_PKG_C7_RESIDENCY);
+			cnt->pc7 = get_msr(cnt->cpu, MSR_PKG_C7_RESIDENCY);
 		if (extra_msr_offset)
-			pcc->extra_msr = get_msr(pcc->cpu, extra_msr_offset);
+			cnt->extra_msr = get_msr(cnt->cpu, extra_msr_offset);
 	}
 }
 
-
-void print_nehalem_info()
+void print_nehalem_info(void)
 {
 	unsigned long long msr;
 	unsigned int ratio;
@@ -514,38 +513,38 @@
 
 }
 
-void free_counter_list(PCC *list)
+void free_counter_list(struct counters *list)
 {
-	PCC *p;
+	struct counters *p;
 
 	for (p = list; p; ) {
-		PCC *free_me;
+		struct counters *free_me;
 
 		free_me = p;
 		p = p->next;
 		free(free_me);
 	}
-	return;
 }
 
 void free_all_counters(void)
 {
-	free_counter_list(pcc_even);
-	pcc_even = NULL;
+	free_counter_list(cnt_even);
+	cnt_even = NULL;
 
-	free_counter_list(pcc_odd);
-	pcc_odd = NULL;
+	free_counter_list(cnt_odd);
+	cnt_odd = NULL;
 
-	free_counter_list(pcc_delta);
-	pcc_delta = NULL;
+	free_counter_list(cnt_delta);
+	cnt_delta = NULL;
 
-	free_counter_list(pcc_average);
-	pcc_average = NULL;
+	free_counter_list(cnt_average);
+	cnt_average = NULL;
 }
 
-void insert_cpu_counters(PCC **list, PCC *new)
+void insert_counters(struct counters **list,
+	struct counters *new)
 {
-	PCC *prev;
+	struct counters *prev;
 
 	/*
 	 * list was empty
@@ -594,18 +593,16 @@
 	 */
 	new->next = prev->next;
 	prev->next = new;
-
-	return;
 }
 
-void alloc_new_cpu_counters(int pkg, int core, int cpu)
+void alloc_new_counters(int pkg, int core, int cpu)
 {
-	PCC *new;
+	struct counters *new;
 
 	if (verbose > 1)
 		printf("pkg%d core%d, cpu%d\n", pkg, core, cpu);
 
-	new = (PCC *)calloc(1, sizeof(PCC));
+	new = (struct counters *)calloc(1, sizeof(struct counters));
 	if (new == NULL) {
 		perror("calloc");
 		exit(1);
@@ -613,9 +610,10 @@
 	new->pkg = pkg;
 	new->core = core;
 	new->cpu = cpu;
-	insert_cpu_counters(&pcc_odd, new);
+	insert_counters(&cnt_odd, new);
 
-	new = (PCC *)calloc(1, sizeof(PCC));
+	new = (struct counters *)calloc(1,
+		sizeof(struct counters));
 	if (new == NULL) {
 		perror("calloc");
 		exit(1);
@@ -623,9 +621,9 @@
 	new->pkg = pkg;
 	new->core = core;
 	new->cpu = cpu;
-	insert_cpu_counters(&pcc_even, new);
+	insert_counters(&cnt_even, new);
 
-	new = (PCC *)calloc(1, sizeof(PCC));
+	new = (struct counters *)calloc(1, sizeof(struct counters));
 	if (new == NULL) {
 		perror("calloc");
 		exit(1);
@@ -633,9 +631,9 @@
 	new->pkg = pkg;
 	new->core = core;
 	new->cpu = cpu;
-	insert_cpu_counters(&pcc_delta, new);
+	insert_counters(&cnt_delta, new);
 
-	new = (PCC *)calloc(1, sizeof(PCC));
+	new = (struct counters *)calloc(1, sizeof(struct counters));
 	if (new == NULL) {
 		perror("calloc");
 		exit(1);
@@ -643,7 +641,7 @@
 	new->pkg = pkg;
 	new->core = core;
 	new->cpu = cpu;
-	pcc_average = new;
+	cnt_average = new;
 }
 
 int get_physical_package_id(int cpu)
@@ -719,7 +717,7 @@
 {
 	printf("turbostat: topology changed, re-initializing.\n");
 	free_all_counters();
-	num_cpus = for_all_cpus(alloc_new_cpu_counters);
+	num_cpus = for_all_cpus(alloc_new_counters);
 	need_reinitialize = 0;
 	printf("num_cpus is now %d\n", num_cpus);
 }
@@ -728,7 +726,7 @@
 /*
  * check to see if a cpu came on-line
  */
-void verify_num_cpus()
+void verify_num_cpus(void)
 {
 	int new_num_cpus;
 
@@ -740,14 +738,12 @@
 				num_cpus, new_num_cpus);
 		need_reinitialize = 1;
 	}
-
-	return;
 }
 
 void turbostat_loop()
 {
 restart:
-	get_counters(pcc_even);
+	get_counters(cnt_even);
 	gettimeofday(&tv_even, (struct timezone *)NULL);
 
 	while (1) {
@@ -757,24 +753,24 @@
 			goto restart;
 		}
 		sleep(interval_sec);
-		get_counters(pcc_odd);
+		get_counters(cnt_odd);
 		gettimeofday(&tv_odd, (struct timezone *)NULL);
 
-		compute_delta(pcc_odd, pcc_even, pcc_delta);
+		compute_delta(cnt_odd, cnt_even, cnt_delta);
 		timersub(&tv_odd, &tv_even, &tv_delta);
-		compute_average(pcc_delta, pcc_average);
-		print_counters(pcc_delta);
+		compute_average(cnt_delta, cnt_average);
+		print_counters(cnt_delta);
 		if (need_reinitialize) {
 			re_initialize();
 			goto restart;
 		}
 		sleep(interval_sec);
-		get_counters(pcc_even);
+		get_counters(cnt_even);
 		gettimeofday(&tv_even, (struct timezone *)NULL);
-		compute_delta(pcc_even, pcc_odd, pcc_delta);
+		compute_delta(cnt_even, cnt_odd, cnt_delta);
 		timersub(&tv_even, &tv_odd, &tv_delta);
-		compute_average(pcc_delta, pcc_average);
-		print_counters(pcc_delta);
+		compute_average(cnt_delta, cnt_average);
+		print_counters(cnt_delta);
 	}
 }
 
@@ -892,7 +888,7 @@
 	 * this check is valid for both Intel and AMD
 	 */
 	asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007));
-	has_invariant_tsc = edx && (1 << 8);
+	has_invariant_tsc = edx & (1 << 8);
 
 	if (!has_invariant_tsc) {
 		fprintf(stderr, "No invariant TSC\n");
@@ -905,7 +901,7 @@
 	 */
 
 	asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
-	has_aperf = ecx && (1 << 0);
+	has_aperf = ecx & (1 << 0);
 	if (!has_aperf) {
 		fprintf(stderr, "No APERF MSR\n");
 		exit(1);
@@ -952,7 +948,7 @@
 	check_dev_msr();
 	check_super_user();
 
-	num_cpus = for_all_cpus(alloc_new_cpu_counters);
+	num_cpus = for_all_cpus(alloc_new_counters);
 
 	if (verbose)
 		print_nehalem_info();
@@ -962,7 +958,7 @@
 {
 	int retval;
 	pid_t child_pid;
-	get_counters(pcc_even);
+	get_counters(cnt_even);
 	gettimeofday(&tv_even, (struct timezone *)NULL);
 
 	child_pid = fork();
@@ -985,14 +981,14 @@
 			exit(1);
 		}
 	}
-	get_counters(pcc_odd);
+	get_counters(cnt_odd);
 	gettimeofday(&tv_odd, (struct timezone *)NULL);
-	retval = compute_delta(pcc_odd, pcc_even, pcc_delta);
+	retval = compute_delta(cnt_odd, cnt_even, cnt_delta);
 
 	timersub(&tv_odd, &tv_even, &tv_delta);
-	compute_average(pcc_delta, pcc_average);
+	compute_average(cnt_delta, cnt_average);
 	if (!retval)
-		print_counters(pcc_delta);
+		print_counters(cnt_delta);
 
 	fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);;
 
diff --git a/usr/Kconfig b/usr/Kconfig
index 4780dea..65b845b 100644
--- a/usr/Kconfig
+++ b/usr/Kconfig
@@ -46,7 +46,7 @@
 	  If you are not sure, leave it set to "0".
 
 config RD_GZIP
-	bool "Support initial ramdisks compressed using gzip" if EMBEDDED
+	bool "Support initial ramdisks compressed using gzip" if EXPERT
 	default y
 	depends on BLK_DEV_INITRD
 	select DECOMPRESS_GZIP
@@ -55,8 +55,8 @@
 	  If unsure, say Y.
 
 config RD_BZIP2
-	bool "Support initial ramdisks compressed using bzip2" if EMBEDDED
-	default !EMBEDDED
+	bool "Support initial ramdisks compressed using bzip2" if EXPERT
+	default !EXPERT
 	depends on BLK_DEV_INITRD
 	select DECOMPRESS_BZIP2
 	help
@@ -64,8 +64,8 @@
 	  If unsure, say N.
 
 config RD_LZMA
-	bool "Support initial ramdisks compressed using LZMA" if EMBEDDED
-	default !EMBEDDED
+	bool "Support initial ramdisks compressed using LZMA" if EXPERT
+	default !EXPERT
 	depends on BLK_DEV_INITRD
 	select DECOMPRESS_LZMA
 	help
@@ -73,8 +73,8 @@
 	  If unsure, say N.
 
 config RD_XZ
-	bool "Support initial ramdisks compressed using XZ" if EMBEDDED
-	default !EMBEDDED
+	bool "Support initial ramdisks compressed using XZ" if EXPERT
+	default !EXPERT
 	depends on BLK_DEV_INITRD
 	select DECOMPRESS_XZ
 	help
@@ -82,8 +82,8 @@
 	  If unsure, say N.
 
 config RD_LZO
-	bool "Support initial ramdisks compressed using LZO" if EMBEDDED
-	default !EMBEDDED
+	bool "Support initial ramdisks compressed using LZO" if EXPERT
+	default !EXPERT
 	depends on BLK_DEV_INITRD
 	select DECOMPRESS_LZO
 	help